repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
BorisJeremic/Real-ESSI-Examples | valgrind_test/test_cases/27NodeBrick/circular_plate_simply_support/4layer/side_length_02/compare_txt.py | 202 | 2092 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error ')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
| cc0-1.0 |
exploreodoo/datStruct | odoo/openerp/addons/base/tests/test_menu.py | 501 | 1450 | import openerp.tests.common as common
class test_menu(common.TransactionCase):
def setUp(self):
super(test_menu,self).setUp()
self.Menus = self.registry('ir.ui.menu')
def test_00_menu_deletion(self):
"""Verify that menu deletion works properly when there are child menus, and those
are indeed made orphans"""
cr, uid, Menus = self.cr, self.uid, self.Menus
# Generic trick necessary for search() calls to avoid hidden menus
ctx = {'ir.ui.menu.full_list': True}
root_id = Menus.create(cr, uid, {'name': 'Test root'})
child1_id = Menus.create(cr, uid, {'name': 'Test child 1', 'parent_id': root_id})
child2_id = Menus.create(cr, uid, {'name': 'Test child 2', 'parent_id': root_id})
child21_id = Menus.create(cr, uid, {'name': 'Test child 2-1', 'parent_id': child2_id})
all_ids = [root_id, child1_id, child2_id, child21_id]
# delete and check that direct children are promoted to top-level
# cfr. explanation in menu.unlink()
Menus.unlink(cr, uid, [root_id])
remaining_ids = Menus.search(cr, uid, [('id', 'in', all_ids)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id, child21_id], remaining_ids)
orphan_ids = Menus.search(cr, uid, [('id', 'in', all_ids), ('parent_id', '=', False)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id], orphan_ids)
| gpl-2.0 |
Dingmatt/AMSA | Plug-ins/Amsa.bundle/Contents/Libraries/Shared/psutil/tests/test_posix.py | 6 | 13728 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""POSIX specific tests."""
import datetime
import errno
import os
import subprocess
import sys
import time
import psutil
from psutil import BSD
from psutil import LINUX
from psutil import OSX
from psutil import POSIX
from psutil import SUNOS
from psutil._compat import callable
from psutil._compat import PY3
from psutil.tests import APPVEYOR
from psutil.tests import get_kernel_version
from psutil.tests import get_test_subprocess
from psutil.tests import mock
from psutil.tests import PYTHON
from psutil.tests import reap_children
from psutil.tests import retry_before_failing
from psutil.tests import run_test_module_by_name
from psutil.tests import sh
from psutil.tests import skip_on_access_denied
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import wait_for_pid
def ps(cmd):
"""Expects a ps command with a -o argument and parse the result
returning only the value of interest.
"""
if not LINUX:
cmd = cmd.replace(" --no-headers ", " ")
if SUNOS:
cmd = cmd.replace("-o command", "-o comm")
cmd = cmd.replace("-o start", "-o stime")
p = subprocess.Popen(cmd, shell=1, stdout=subprocess.PIPE)
output = p.communicate()[0].strip()
if PY3:
output = str(output, sys.stdout.encoding)
if not LINUX:
output = output.split('\n')[1].strip()
try:
return int(output)
except ValueError:
return output
@unittest.skipUnless(POSIX, "POSIX only")
class TestProcess(unittest.TestCase):
"""Compare psutil results against 'ps' command line utility (mainly)."""
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess([PYTHON, "-E", "-O"],
stdin=subprocess.PIPE).pid
wait_for_pid(cls.pid)
@classmethod
def tearDownClass(cls):
reap_children()
# for ps -o arguments see: http://unixhelp.ed.ac.uk/CGI/man-cgi?ps
def test_ppid(self):
ppid_ps = ps("ps --no-headers -o ppid -p %s" % self.pid)
ppid_psutil = psutil.Process(self.pid).ppid()
self.assertEqual(ppid_ps, ppid_psutil)
def test_uid(self):
uid_ps = ps("ps --no-headers -o uid -p %s" % self.pid)
uid_psutil = psutil.Process(self.pid).uids().real
self.assertEqual(uid_ps, uid_psutil)
def test_gid(self):
gid_ps = ps("ps --no-headers -o rgid -p %s" % self.pid)
gid_psutil = psutil.Process(self.pid).gids().real
self.assertEqual(gid_ps, gid_psutil)
def test_username(self):
username_ps = ps("ps --no-headers -o user -p %s" % self.pid)
username_psutil = psutil.Process(self.pid).username()
self.assertEqual(username_ps, username_psutil)
@skip_on_access_denied()
@retry_before_failing()
def test_rss_memory(self):
# give python interpreter some time to properly initialize
# so that the results are the same
time.sleep(0.1)
rss_ps = ps("ps --no-headers -o rss -p %s" % self.pid)
rss_psutil = psutil.Process(self.pid).memory_info()[0] / 1024
self.assertEqual(rss_ps, rss_psutil)
@skip_on_access_denied()
@retry_before_failing()
def test_vsz_memory(self):
# give python interpreter some time to properly initialize
# so that the results are the same
time.sleep(0.1)
vsz_ps = ps("ps --no-headers -o vsz -p %s" % self.pid)
vsz_psutil = psutil.Process(self.pid).memory_info()[1] / 1024
self.assertEqual(vsz_ps, vsz_psutil)
def test_name(self):
# use command + arg since "comm" keyword not supported on all platforms
name_ps = ps("ps --no-headers -o command -p %s" % (
self.pid)).split(' ')[0]
# remove path if there is any, from the command
name_ps = os.path.basename(name_ps).lower()
name_psutil = psutil.Process(self.pid).name().lower()
self.assertEqual(name_ps, name_psutil)
@unittest.skipIf(OSX or BSD, 'ps -o start not available')
def test_create_time(self):
time_ps = ps("ps --no-headers -o start -p %s" % self.pid).split(' ')[0]
time_psutil = psutil.Process(self.pid).create_time()
time_psutil_tstamp = datetime.datetime.fromtimestamp(
time_psutil).strftime("%H:%M:%S")
# sometimes ps shows the time rounded up instead of down, so we check
# for both possible values
round_time_psutil = round(time_psutil)
round_time_psutil_tstamp = datetime.datetime.fromtimestamp(
round_time_psutil).strftime("%H:%M:%S")
self.assertIn(time_ps, [time_psutil_tstamp, round_time_psutil_tstamp])
def test_exe(self):
ps_pathname = ps("ps --no-headers -o command -p %s" %
self.pid).split(' ')[0]
psutil_pathname = psutil.Process(self.pid).exe()
try:
self.assertEqual(ps_pathname, psutil_pathname)
except AssertionError:
# certain platforms such as BSD are more accurate returning:
# "/usr/local/bin/python2.7"
# ...instead of:
# "/usr/local/bin/python"
# We do not want to consider this difference in accuracy
# an error.
adjusted_ps_pathname = ps_pathname[:len(ps_pathname)]
self.assertEqual(ps_pathname, adjusted_ps_pathname)
def test_cmdline(self):
ps_cmdline = ps("ps --no-headers -o command -p %s" % self.pid)
psutil_cmdline = " ".join(psutil.Process(self.pid).cmdline())
if SUNOS:
# ps on Solaris only shows the first part of the cmdline
psutil_cmdline = psutil_cmdline.split(" ")[0]
self.assertEqual(ps_cmdline, psutil_cmdline)
def test_nice(self):
ps_nice = ps("ps --no-headers -o nice -p %s" % self.pid)
psutil_nice = psutil.Process().nice()
self.assertEqual(ps_nice, psutil_nice)
def test_num_fds(self):
# Note: this fails from time to time; I'm keen on thinking
# it doesn't mean something is broken
def call(p, attr):
args = ()
attr = getattr(p, name, None)
if attr is not None and callable(attr):
if name == 'rlimit':
args = (psutil.RLIMIT_NOFILE,)
attr(*args)
else:
attr
p = psutil.Process(os.getpid())
failures = []
ignored_names = ['terminate', 'kill', 'suspend', 'resume', 'nice',
'send_signal', 'wait', 'children', 'as_dict']
if LINUX and get_kernel_version() < (2, 6, 36):
ignored_names.append('rlimit')
if LINUX and get_kernel_version() < (2, 6, 23):
ignored_names.append('num_ctx_switches')
for name in dir(psutil.Process):
if (name.startswith('_') or name in ignored_names):
continue
else:
try:
num1 = p.num_fds()
for x in range(2):
call(p, name)
num2 = p.num_fds()
except psutil.AccessDenied:
pass
else:
if abs(num2 - num1) > 1:
fail = "failure while processing Process.%s method " \
"(before=%s, after=%s)" % (name, num1, num2)
failures.append(fail)
if failures:
self.fail('\n' + '\n'.join(failures))
@unittest.skipUnless(os.path.islink("/proc/%s/cwd" % os.getpid()),
"/proc fs not available")
def test_cwd(self):
self.assertEqual(os.readlink("/proc/%s/cwd" % os.getpid()),
psutil.Process().cwd())
@unittest.skipUnless(POSIX, "POSIX only")
class TestSystemAPIs(unittest.TestCase):
"""Test some system APIs."""
@retry_before_failing()
def test_pids(self):
# Note: this test might fail if the OS is starting/killing
# other processes in the meantime
if SUNOS:
cmd = ["ps", "-A", "-o", "pid"]
else:
cmd = ["ps", "ax", "-o", "pid"]
p = get_test_subprocess(cmd, stdout=subprocess.PIPE)
output = p.communicate()[0].strip()
assert p.poll() == 0
if PY3:
output = str(output, sys.stdout.encoding)
pids_ps = []
for line in output.split('\n')[1:]:
if line:
pid = int(line.split()[0].strip())
pids_ps.append(pid)
# remove ps subprocess pid which is supposed to be dead in meantime
pids_ps.remove(p.pid)
pids_psutil = psutil.pids()
pids_ps.sort()
pids_psutil.sort()
# on OSX ps doesn't show pid 0
if OSX and 0 not in pids_ps:
pids_ps.insert(0, 0)
if pids_ps != pids_psutil:
difference = [x for x in pids_psutil if x not in pids_ps] + \
[x for x in pids_ps if x not in pids_psutil]
self.fail("difference: " + str(difference))
# for some reason ifconfig -a does not report all interfaces
# returned by psutil
@unittest.skipIf(SUNOS, "unreliable on SUNOS")
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_nic_names(self):
p = subprocess.Popen("ifconfig -a", shell=1, stdout=subprocess.PIPE)
output = p.communicate()[0].strip()
if p.returncode != 0:
raise unittest.SkipTest('ifconfig returned no output')
if PY3:
output = str(output, sys.stdout.encoding)
for nic in psutil.net_io_counters(pernic=True).keys():
for line in output.split():
if line.startswith(nic):
break
else:
self.fail(
"couldn't find %s nic in 'ifconfig -a' output\n%s" % (
nic, output))
# can't find users on APPVEYOR or TRAVIS
@unittest.skipIf(APPVEYOR or TRAVIS and not psutil.users(),
"unreliable on APPVEYOR or TRAVIS")
@retry_before_failing()
def test_users(self):
out = sh("who")
lines = out.split('\n')
users = [x.split()[0] for x in lines]
self.assertEqual(len(users), len(psutil.users()))
terminals = [x.split()[1] for x in lines]
for u in psutil.users():
self.assertTrue(u.name in users, u.name)
self.assertTrue(u.terminal in terminals, u.terminal)
def test_pid_exists_let_raise(self):
# According to "man 2 kill" possible error values for kill
# are (EINVAL, EPERM, ESRCH). Test that any other errno
# results in an exception.
with mock.patch("psutil._psposix.os.kill",
side_effect=OSError(errno.EBADF, "")) as m:
self.assertRaises(OSError, psutil._psposix.pid_exists, os.getpid())
assert m.called
def test_os_waitpid_let_raise(self):
# os.waitpid() is supposed to catch EINTR and ECHILD only.
# Test that any other errno results in an exception.
with mock.patch("psutil._psposix.os.waitpid",
side_effect=OSError(errno.EBADF, "")) as m:
self.assertRaises(OSError, psutil._psposix.wait_pid, os.getpid())
assert m.called
def test_os_waitpid_eintr(self):
# os.waitpid() is supposed to "retry" on EINTR.
with mock.patch("psutil._psposix.os.waitpid",
side_effect=OSError(errno.EINTR, "")) as m:
self.assertRaises(
psutil._psposix.TimeoutExpired,
psutil._psposix.wait_pid, os.getpid(), timeout=0.01)
assert m.called
def test_os_waitpid_bad_ret_status(self):
# Simulate os.waitpid() returning a bad status.
with mock.patch("psutil._psposix.os.waitpid",
return_value=(1, -1)) as m:
self.assertRaises(ValueError,
psutil._psposix.wait_pid, os.getpid())
assert m.called
def test_disk_usage(self):
def df(device):
out = sh("df -k %s" % device).strip()
line = out.split('\n')[1]
fields = line.split()
total = int(fields[1]) * 1024
used = int(fields[2]) * 1024
free = int(fields[3]) * 1024
percent = float(fields[4].replace('%', ''))
return (total, used, free, percent)
tolerance = 4 * 1024 * 1024 # 4MB
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
try:
total, used, free, percent = df(part.device)
except RuntimeError as err:
# see:
# https://travis-ci.org/giampaolo/psutil/jobs/138338464
# https://travis-ci.org/giampaolo/psutil/jobs/138343361
if "no such file or directory" in str(err).lower() or \
"raw devices not supported" in str(err).lower():
continue
else:
raise
else:
self.assertAlmostEqual(usage.total, total, delta=tolerance)
self.assertAlmostEqual(usage.used, used, delta=tolerance)
self.assertAlmostEqual(usage.free, free, delta=tolerance)
self.assertAlmostEqual(usage.percent, percent, delta=1)
if __name__ == '__main__':
run_test_module_by_name(__file__)
| gpl-3.0 |
stwunsch/gnuradio | gr-uhd/examples/python/usrp_tv_rcv_nogui.py | 56 | 9334 | #!/usr/bin/env python
#
# Copyright 2005-2007,2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Reads from a file and generates PAL TV pictures in black and white
which can be displayed using ImageMagick or realtime using
gr-video-sdl (To capture the input file Use usrp_rx_file.py, or use
usrp_rx_cfile.py --output-shorts if you have a recent enough
usrp_rx_cfile.py)
Can also use usrp directly as capture source, but then you need a
higher decimation factor (64) and thus get a lower horizontal
resulution. There is no synchronisation yet. The sync blocks are in
development but not yet in cvs.
"""
from gnuradio import gr, eng_notation
from gnuradio import analog
from gnuradio import blocks
from gnuradio import audio
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
from gnuradio import video_sdl
except:
print "FYI: gr-video-sdl is not installed"
print "realtime \"sdl\" video output window will not be available"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
usage=("%prog: [options] output_filename.\nSpecial output_filename" + \
"\"sdl\" will use video_sink_sdl as realtime output window. " + \
"You then need to have gr-video-sdl installed.\n" +\
"Make sure your input capture file containes interleaved " + \
"shorts not complex floats")
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-s", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate")
parser.add_option("-c", "--contrast", type="eng_float", default=1.0,
help="set contrast (default is 1.0)")
parser.add_option("-b", "--brightness", type="eng_float", default=0.0,
help="set brightness (default is 0)")
parser.add_option("-i", "--in-filename", type="string", default=None,
help="Use input file as source. samples must be " + \
"interleaved shorts \n Use usrp_rx_file.py or " + \
"usrp_rx_cfile.py --output-shorts.\n Special " + \
"name \"usrp\" results in realtime capturing " + \
"and processing using usrp.\n" + \
"You then probably need a decimation factor of 64 or higher.")
parser.add_option("-f", "--freq", type="eng_float", default=519.25e6,
help="set frequency to FREQ.\nNote that the frequency of the video carrier is not at the middle of the TV channel", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-p", "--pal", action="store_true", default=False,
help="PAL video format (this is the default)")
parser.add_option("-n", "--ntsc", action="store_true", default=False,
help="NTSC video format")
parser.add_option("-r", "--repeat", action="store_false", default=True,
help="repeat in_file in a loop")
parser.add_option("-N", "--nframes", type="eng_float", default=None,
help="number of frames to collect [default=+inf]")
parser.add_option("", "--freq-min", type="eng_float", default=50.25e6,
help="Set a minimum frequency [default=%default]")
parser.add_option("", "--freq-max", type="eng_float", default=900.25e6,
help="Set a maximum frequency [default=%default]")
(options, args) = parser.parse_args ()
if not (len(args) == 1):
parser.print_help()
sys.stderr.write('You must specify the output. FILENAME or sdl \n');
sys.exit(1)
filename = args[0]
self.tv_freq_min = options.freq_min
self.tv_freq_max = options.freq_max
if options.in_filename is None:
parser.print_help()
sys.stderr.write('You must specify the input -i FILENAME or -i usrp\n');
raise SystemExit, 1
if not (filename=="sdl"):
options.repeat=False
input_rate = options.samp_rate
print "video sample rate %s" % (eng_notation.num_to_str(input_rate))
if not (options.in_filename=="usrp"):
# file is data source, capture with usr_rx_csfile.py
self.filesource = blocks.file_source(gr.sizeof_short,
options.in_filename,
options.repeat)
self.istoc = blocks.interleaved_short_to_complex()
self.connect(self.filesource,self.istoc)
self.src=self.istoc
else:
if options.freq is None:
parser.print_help()
sys.stderr.write('You must specify the frequency with -f FREQ\n');
raise SystemExit, 1
# build the graph
self.u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
self.u.set_samp_rate(input_rate)
dev_rate = self.u.get_samp_rate()
self.src=self.u
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2.0
self.u.set_gain(options.gain)
r = self.u.set_center_freq(options.freq)
if not r:
sys.stderr.write('Failed to set frequency\n')
raise SystemExit, 1
self.agc = analog.agc_cc(1e-7,1.0,1.0) #1e-7
self.am_demod = blocks.complex_to_mag ()
self.set_blacklevel = blocks.add_const_ff(options.brightness +255.0)
self.invert_and_scale = blocks.multiply_const_ff(-options.contrast *128.0*255.0/(200.0))
self.f2uc = blocks.float_to_uchar()
# sdl window as final sink
if not (options.pal or options.ntsc):
options.pal=True #set default to PAL
if options.pal:
lines_per_frame=625.0
frames_per_sec=25.0
show_width=768
elif options.ntsc:
lines_per_frame=525.0
frames_per_sec=29.97002997
show_width=640
width=int(input_rate/(lines_per_frame*frames_per_sec))
height=int(lines_per_frame)
if filename=="sdl":
#Here comes the tv screen, you have to build and install
#gr-video-sdl for this (subproject of gnuradio)
try:
video_sink = video_sdl.sink_uc(frames_per_sec, width, height, 0,
show_width,height)
except:
print "gr-video-sdl is not installed"
print "realtime \"sdl\" video output window is not available"
raise SystemExit, 1
self.dst=video_sink
else:
print "You can use the imagemagick display tool to show the resulting imagesequence"
print "use the following line to show the demodulated TV-signal:"
print "display -depth 8 -size " +str(width)+ "x" + str(height) + " gray:" +filename
print "(Use the spacebar to advance to next frames)"
file_sink = blocks.file_sink(gr.sizeof_char, filename)
self.dst =file_sink
if options.nframes is None:
self.connect(self.src, self.agc)
else:
self.head = blocks.head(gr.sizeof_gr_complex, int(options.nframes*width*height))
self.connect(self.src, self.head, self.agc)
self.connect (self.agc, self.am_demod, self.invert_and_scale,
self.set_blacklevel, self.f2uc, self.dst)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/cros/commands/cros_flash.py | 1 | 41020 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Install/copy the image to the device."""
from __future__ import print_function
import cStringIO
import logging
import os
import shutil
import sys
import tempfile
import time
import urlparse
from chromite import cros
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import dev_server_wrapper as ds_wrapper
from chromite.lib import osutils
from chromite.lib import remote_access
DEVSERVER_PKG_DIR = os.path.join(constants.SOURCE_ROOT, 'src/platform/dev')
DEVSERVER_STATIC_DIR = cros_build_lib.FromChrootPath(
os.path.join(constants.CHROOT_SOURCE_ROOT, 'devserver', 'static'))
IMAGE_NAME_TO_TYPE = {
'chromiumos_test_image.bin': 'test',
'chromiumos_image.bin': 'dev',
'chromiumos_base_image.bin': 'base',
'recovery_image.bin': 'recovery',
}
IMAGE_TYPE_TO_NAME = {
'test': 'chromiumos_test_image.bin',
'dev': 'chromiumos_image.bin',
'base': 'chromiumos_base_image.bin',
'recovery': 'recovery_image.bin',
}
XBUDDY_REMOTE = 'remote'
XBUDDY_LOCAL = 'local'
def ConvertTranslatedPath(original_path, translated_path):
"""Converts a translated xbuddy path to an xbuddy path.
Devserver/xbuddy does not accept requests with translated xbuddy
path (build-id/version/image-name). This function converts such a
translated path to an xbuddy path that is suitable to used in
devserver requests.
Args:
original_path: the xbuddy path before translation.
(e.g., remote/peppy/latest-canary).
translated_path: the translated xbuddy path
(e.g., peppy-release/R36-5760.0.0).
Returns:
A xbuddy path uniquely identifies a build and can be used in devserver
requests: {local|remote}/build-id/version/image_type
"""
chunks = translated_path.split(os.path.sep)
chunks[-1] = IMAGE_NAME_TO_TYPE[chunks[-1]]
if _GetXbuddyPath(original_path).startswith(XBUDDY_REMOTE):
chunks = [XBUDDY_REMOTE] + chunks
else:
chunks = [XBUDDY_LOCAL] + chunks
return os.path.sep.join(chunks)
def _GetXbuddyPath(path):
"""A helper function to parse an xbuddy path.
Args:
path: Either a path without no scheme or an xbuddy://path/for/xbuddy
Returns:
path/for/xbuddy if |path| is xbuddy://path/for/xbuddy; otherwise,
returns |path|.
Raises:
ValueError if |path| uses any scheme other than xbuddy://.
"""
parsed = urlparse.urlparse(path)
# pylint: disable=E1101
if parsed.scheme == 'xbuddy':
return '%s%s' % (parsed.netloc, parsed.path)
elif parsed.scheme == '':
logging.debug('Assuming %s is an xbuddy path.', path)
return path
else:
raise ValueError('Do not support scheme %s.', parsed.scheme)
def GetImagePathWithXbuddy(path, board, device='<DEVICE>'):
"""Gets image path using xbuddy.
Ask xbuddy to translate |path|, and if necessary, download and stage the
image, then return a translated path to the image.
Args:
path: The xbuddy path.
board: The default board to use if board is not specified in |path|.
device: The device specified by the user.
Returns:
A translated path to the image: build-id/version/image_name.
"""
# Import xbuddy for translating, downloading and staging the image.
if not os.path.exists(DEVSERVER_PKG_DIR):
raise Exception('Cannot find xbuddy module. Devserver package directory '
'does not exist: %s' % DEVSERVER_PKG_DIR)
sys.path.append(DEVSERVER_PKG_DIR)
import xbuddy
xb = xbuddy.XBuddy(static_dir=DEVSERVER_STATIC_DIR, board=board,
log_screen=False)
path_list = _GetXbuddyPath(path).rsplit(os.path.sep)
try:
build_id, file_name = xb.Get(path_list)
return os.path.join(build_id, file_name)
except xbuddy.XBuddyException as e:
logging.error('Locating image "%s" failed. The path might not be valid or '
'the image might not exist. To get the latest remote image, '
'please run:\ncros flash --board=%s %s remote/latest', path,
board, device)
raise ValueError('Cannot locate image %s: %s' % (path, e))
def GenerateXbuddyRequest(path, req_type):
"""Generate an xbuddy request used to retreive payloads.
This function generates a xbuddy request based on |path| and
|req_type|, which can be used to query the devserver. For request
type 'image' ('update'), the devserver will repond with a URL
pointing to the folder where the image (update payloads) is stored.
Args:
path: An xbuddy path (with or without xbuddy://).
req_type: xbuddy request type ('update', 'image', or 'translate').
Returns:
A xbuddy request.
"""
if req_type == 'update':
return 'xbuddy/%s?for_update=true&return_dir=true' % _GetXbuddyPath(path)
elif req_type == 'image':
return 'xbuddy/%s?return_dir=true' % _GetXbuddyPath(path)
elif req_type == 'translate':
return 'xbuddy_translate/%s' % _GetXbuddyPath(path)
else:
raise ValueError('Does not support xbuddy request type %s' % req_type)
def TranslatedPathToLocalPath(translated_path, static_dir):
"""Convert the translated path to a local path to the image file.
Args:
translated_path: the translated xbuddy path
(e.g., peppy-release/R36-5760.0.0/chromiumos_image).
static_dir: The static directory used by the devserver.
Returns:
A local path to the image file.
"""
real_path = osutils.ExpandPath(os.path.join(static_dir, translated_path))
if os.path.exists(real_path):
return real_path
else:
return cros_build_lib.FromChrootPath(real_path)
class USBImager(object):
"""Copy image to the target removable device."""
def __init__(self, device, board, image, debug=False, install=False,
yes=False):
"""Initalizes USBImager."""
self.device = device
self.board = board if board else cros_build_lib.GetDefaultBoard()
self.image = image
self.debug = debug
self.debug_level = logging.DEBUG if debug else logging.INFO
self.install = install
self.yes = yes
def DeviceNameToPath(self, device_name):
return '/dev/%s' % device_name
def GetRemovableDeviceDescription(self, device):
"""Returns a informational description of the removable |device|.
Args:
device: the device name (e.g. sdc).
Returns:
A string describing |device| (e.g. Patriot Memory 7918 MB).
"""
desc = [
osutils.GetDeviceInfo(device, keyword='manufacturer'),
osutils.GetDeviceInfo(device, keyword='product'),
osutils.GetDeviceSize(self.DeviceNameToPath(device)),
'(%s)' % self.DeviceNameToPath(device),
]
return ' '.join([x for x in desc if x])
def ListAllRemovableDevices(self):
"""Returns a list of removable devices.
Returns:
A list of device names (e.g. ['sdb', 'sdc']).
"""
devices = osutils.ListBlockDevices()
removable_devices = []
for d in devices:
if d.TYPE == 'disk' and d.RM == '1':
removable_devices.append(d.NAME)
return removable_devices
def ChooseRemovableDevice(self, devices):
"""Lists all removable devices and asks user to select/confirm.
Args:
devices: a list of device names (e.g. ['sda', 'sdb']).
Returns:
The device name chosen by the user.
"""
idx = cros_build_lib.GetChoice(
'Removable device(s) found. Please select/confirm to continue:',
[self.GetRemovableDeviceDescription(x) for x in devices])
return devices[idx]
def InstallImageToDevice(self, image, device):
"""Installs |image| to the removable |device|.
Args:
image: Path to the image to copy.
device: Device to copy to.
"""
if not self.board:
raise Exception('Couldn\'t determine what board to use')
cmd = [
'%s/usr/sbin/chromeos-install' % cros_build_lib.GetSysroot(self.board),
'--yes',
'--skip_src_removable',
'--skip_dst_removable',
'--payload_image=%s' % image,
'--dst=%s' % device,
'--skip_postinstall',
]
cros_build_lib.SudoRunCommand(cmd)
def CopyImageToDevice(self, image, device):
"""Copies |image| to the removable |device|.
Args:
image: Path to the image to copy.
device: Device to copy to.
"""
# Use pv to display progress bar if possible.
cmd_base = 'pv -pretb'
try:
cros_build_lib.RunCommand(['pv', '--version'], print_cmd=False,
capture_output=True)
except cros_build_lib.RunCommandError:
cmd_base = 'cat'
cmd = '%s %s | dd of=%s bs=4M iflag=fullblock oflag=sync' % (
cmd_base, image, device)
cros_build_lib.SudoRunCommand(cmd, shell=True)
cros_build_lib.SudoRunCommand(['sync'], debug_level=self.debug_level)
def IsFilePathGPTDiskImage(self, file_path):
"""Determines if the file is a valid GPT disk."""
if os.path.isfile(file_path):
with cros_build_lib.Open(file_path) as image_file:
image_file.seek(0x1fe)
if image_file.read(10) == '\x55\xaaEFI PART':
return True
return False
def ChooseImageFromDirectory(self, dir_path):
"""Lists all image files in |dir_path| and ask user to select one."""
images = [x for x in os.listdir(dir_path) if
self.IsFilePathGPTDiskImage(os.path.join(dir_path, x))]
idx = 0
if len(images) == 0:
raise ValueError('No image found in %s.' % dir_path)
elif len(images) > 1:
idx = cros_build_lib.GetChoice(
'Multiple images found in %s. Please select one to continue:' % (
(dir_path,)),
images)
return os.path.join(dir_path, images[idx])
def _GetImagePath(self):
"""Returns the image path to use."""
image_path = translated_path = None
if os.path.isfile(self.image):
if not self.yes and not self.IsFilePathGPTDiskImage(self.image):
# TODO(wnwen): Open the tarball and if there is just one file in it,
# use that instead. Existing code in upload_symbols.py.
if cros_build_lib.BooleanPrompt(
prolog='The given image file is not a valid disk image. Perhaps '
'you forgot to untar it.',
prompt='Terminate the current flash process?'):
cros_build_lib.Die('Cros Flash terminated by user.')
image_path = self.image
elif os.path.isdir(self.image):
# Ask user which image (*.bin) in the folder to use.
image_path = self.ChooseImageFromDirectory(self.image)
else:
# Translate the xbuddy path to get the exact image to use.
translated_path = GetImagePathWithXbuddy(self.image, self.board,
'usb://%s' % self.device)
image_path = TranslatedPathToLocalPath(translated_path,
DEVSERVER_STATIC_DIR)
logging.info('Using image %s', translated_path or image_path)
return image_path
def Run(self):
"""Image the removable device."""
devices = self.ListAllRemovableDevices()
if self.device:
# If user specified a device path, check if it exists.
if not os.path.exists(self.device):
cros_build_lib.Die('Device path %s does not exist.' % self.device)
# Then check if it is removable.
if self.device not in [self.DeviceNameToPath(x) for x in devices]:
msg = '%s is not a removable device.' % self.device
if not (self.yes or cros_build_lib.BooleanPrompt(
default=False, prolog=msg)):
cros_build_lib.Die('You can specify usb:// to choose from a list of '
'removable devices.')
target = None
if self.device:
# Get device name from path (e.g. sdc in /dev/sdc).
target = self.device.rsplit(os.path.sep, 1)[-1]
elif devices:
# Ask user to choose from the list.
target = self.ChooseRemovableDevice(devices)
else:
cros_build_lib.Die('No removable devices detected.')
image_path = self._GetImagePath()
try:
device = self.DeviceNameToPath(target)
if self.install:
self.InstallImageToDevice(image_path, device)
else:
self.CopyImageToDevice(image_path, device)
except cros_build_lib.RunCommandError:
logging.error('Failed copying image to device %s',
self.DeviceNameToPath(target))
class FileImager(USBImager):
"""Copy image to the target path."""
def Run(self):
"""Copy the image to the path specified by self.device."""
if not os.path.exists(self.device):
cros_build_lib.Die('Path %s does not exist.' % self.device)
image_path = self._GetImagePath()
if os.path.isdir(self.device):
logging.info('Copying to %s',
os.path.join(self.device, os.path.basename(image_path)))
else:
logging.info('Copying to %s', self.device)
try:
shutil.copy(image_path, self.device)
except IOError:
logging.error('Failed to copy image %s to %s', image_path, self.device)
class DeviceUpdateError(Exception):
"""Thrown when there is an error during device update."""
class RemoteDeviceUpdater(object):
"""Performs update on a remote device."""
ROOTFS_FILENAME = 'update.gz'
STATEFUL_FILENAME = 'stateful.tgz'
DEVSERVER_FILENAME = 'devserver.py'
STATEFUL_UPDATE_BIN = '/usr/bin/stateful_update'
UPDATE_ENGINE_BIN = 'update_engine_client'
UPDATE_CHECK_INTERVAL = 10
# Root working directory on the device. This directory is in the
# stateful partition and thus has enough space to store the payloads.
DEVICE_BASE_DIR = '/mnt/stateful_partition/cros-flash'
def __init__(self, ssh_hostname, ssh_port, image, stateful_update=True,
rootfs_update=True, clobber_stateful=False, reboot=True,
board=None, src_image_to_delta=None, wipe=True, debug=False,
yes=False, ping=True, disable_verification=False):
"""Initializes RemoteDeviceUpdater"""
if not stateful_update and not rootfs_update:
cros_build_lib.Die('No update operation to perform. Use -h to see usage.')
self.tempdir = tempfile.mkdtemp(prefix='cros-flash')
self.ssh_hostname = ssh_hostname
self.ssh_port = ssh_port
self.image = image
self.board = board
self.src_image_to_delta = src_image_to_delta
self.do_stateful_update = stateful_update
self.do_rootfs_update = rootfs_update
self.disable_verification = disable_verification
self.clobber_stateful = clobber_stateful
self.reboot = reboot
self.debug = debug
self.ping = ping
# Do not wipe if debug is set.
self.wipe = wipe and not debug
self.yes = yes
# The variables below are set if user passes an local image path.
# Used to store a copy of the local image.
self.image_tempdir = None
# Used to store a symlink in devserver's static_dir.
self.static_tempdir = None
@classmethod
def GetUpdateStatus(cls, device, keys=None):
"""Returns the status of the update engine on the |device|.
Retrieves the status from update engine and confirms all keys are
in the status.
Args:
device: A ChromiumOSDevice object.
keys: the keys to look for in the status result (defaults to
['CURRENT_OP']).
Returns:
A list of values in the order of |keys|.
"""
keys = ['CURRENT_OP'] if not keys else keys
result = device.RunCommand([cls.UPDATE_ENGINE_BIN, '--status'],
capture_output=True)
if not result.output:
raise Exception('Cannot get update status')
try:
status = cros_build_lib.LoadKeyValueFile(
cStringIO.StringIO(result.output))
except ValueError:
raise ValueError('Cannot parse update status')
values = []
for key in keys:
if key not in status:
raise ValueError('Missing %s in the update engine status')
values.append(status.get(key))
return values
def UpdateStateful(self, device, payload, clobber=False):
"""Update the stateful partition of the device.
Args:
device: The ChromiumOSDevice object to update.
payload: The path to the update payload.
clobber: Clobber stateful partition (defaults to False).
"""
# Copy latest stateful_update to device.
stateful_update_bin = cros_build_lib.FromChrootPath(
self.STATEFUL_UPDATE_BIN)
device.CopyToWorkDir(stateful_update_bin)
msg = 'Updating stateful partition'
logging.info('Copying stateful payload to device...')
device.CopyToWorkDir(payload)
cmd = ['sh',
os.path.join(device.work_dir,
os.path.basename(self.STATEFUL_UPDATE_BIN)),
os.path.join(device.work_dir, os.path.basename(payload))]
if clobber:
cmd.append('--stateful_change=clean')
msg += ' with clobber enabled'
logging.info('%s...', msg)
try:
device.RunCommand(cmd)
except cros_build_lib.RunCommandError:
logging.error('Faild to perform stateful partition update.')
def _CopyDevServerPackage(self, device, tempdir):
"""Copy devserver package to work directory of device.
Args:
device: The ChromiumOSDevice object to copy the package to.
tempdir: The directory to temporarily store devserver package.
"""
logging.info('Copying devserver package to device...')
src_dir = os.path.join(tempdir, 'src')
osutils.RmDir(src_dir, ignore_missing=True)
shutil.copytree(
DEVSERVER_PKG_DIR, src_dir,
ignore=shutil.ignore_patterns('*.pyc', 'tmp*', '.*', 'static', '*~'))
device.CopyToWorkDir(src_dir)
return os.path.join(device.work_dir, os.path.basename(src_dir))
def SetupRootfsUpdate(self, device):
"""Makes sure |device| is ready for rootfs update."""
logging.info('Checking if update engine is idle...')
status, = self.GetUpdateStatus(device)
if status == 'UPDATE_STATUS_UPDATED_NEED_REBOOT':
logging.info('Device needs to reboot before updating...')
device.Reboot()
status, = self.GetUpdateStatus(device)
if status != 'UPDATE_STATUS_IDLE':
raise DeviceUpdateError('Update engine is not idle. Status: %s' % status)
def UpdateRootfs(self, device, payload, tempdir):
"""Update the rootfs partition of the device.
Args:
device: The ChromiumOSDevice object to update.
payload: The path to the update payload.
tempdir: The directory to store temporary files.
"""
# Setup devserver and payload on the target device.
static_dir = os.path.join(device.work_dir, 'static')
payload_dir = os.path.join(static_dir, 'pregenerated')
src_dir = self._CopyDevServerPackage(device, tempdir)
device.RunCommand(['mkdir', '-p', payload_dir])
logging.info('Copying rootfs payload to device...')
device.CopyToDevice(payload, payload_dir)
devserver_bin = os.path.join(src_dir, self.DEVSERVER_FILENAME)
ds = ds_wrapper.RemoteDevServerWrapper(
device, devserver_bin, static_dir=static_dir, log_dir=device.work_dir)
logging.info('Updating rootfs partition')
try:
ds.Start()
# Use the localhost IP address to ensure that update engine
# client can connect to the devserver.
omaha_url = ds.GetDevServerURL(
ip='127.0.0.1', port=ds.port, sub_dir='update/pregenerated')
cmd = [self.UPDATE_ENGINE_BIN, '-check_for_update',
'-omaha_url=%s' % omaha_url]
device.RunCommand(cmd)
# Loop until update is complete.
while True:
op, progress = self.GetUpdateStatus(device, ['CURRENT_OP', 'PROGRESS'])
logging.info('Waiting for update...status: %s at progress %s',
op, progress)
if op == 'UPDATE_STATUS_UPDATED_NEED_REBOOT':
break
if op == 'UPDATE_STATUS_IDLE':
raise DeviceUpdateError(
'Update failed with unexpected update status: %s' % op)
time.sleep(self.UPDATE_CHECK_INTERVAL)
ds.Stop()
except Exception:
logging.error('Rootfs update failed.')
logging.warning(ds.TailLog() or 'No devserver log is available.')
raise
finally:
ds.Stop()
device.CopyFromDevice(ds.log_file,
os.path.join(tempdir, 'target_devserver.log'),
error_code_ok=True)
device.CopyFromDevice('/var/log/update_engine.log', tempdir,
follow_symlinks=True,
error_code_ok=True)
def ConvertLocalPathToXbuddyPath(self, path):
"""Converts |path| to an xbuddy path.
This function copies the image into a temprary directory in chroot
and creates a symlink in static_dir for devserver/xbuddy to
access.
Args:
path: Path to an image.
Returns:
The xbuddy path for |path|.
"""
self.image_tempdir = osutils.TempDir(
base_dir=cros_build_lib.FromChrootPath('/tmp'),
prefix='cros_flash_local_image',
sudo_rm=True)
tempdir_path = self.image_tempdir.tempdir
logging.info('Copying image to temporary directory %s', tempdir_path)
# Devserver only knows the image names listed in IMAGE_TYPE_TO_NAME.
# Rename the image to chromiumos_test_image.bin when copying.
TEMP_IMAGE_TYPE = 'test'
shutil.copy(path,
os.path.join(tempdir_path, IMAGE_TYPE_TO_NAME[TEMP_IMAGE_TYPE]))
chroot_path = cros_build_lib.ToChrootPath(tempdir_path)
# Create and link static_dir/local_imagexxxx/link to the image
# folder, so that xbuddy/devserver can understand the path.
# Alternatively, we can to pass '--image' at devserver startup,
# but this flag is deprecated.
self.static_tempdir = osutils.TempDir(base_dir=DEVSERVER_STATIC_DIR,
prefix='local_image',
sudo_rm=True)
relative_dir = os.path.join(os.path.basename(self.static_tempdir.tempdir),
'link')
symlink_path = os.path.join(DEVSERVER_STATIC_DIR, relative_dir)
logging.info('Creating a symlink %s -> %s', symlink_path, chroot_path)
os.symlink(chroot_path, symlink_path)
return os.path.join(relative_dir, TEMP_IMAGE_TYPE)
def GetUpdatePayloads(self, path, payload_dir, board=None,
src_image_to_delta=None, timeout=60 * 15):
"""Launch devserver to get the update payloads.
Args:
path: The xbuddy path.
payload_dir: The directory to store the payloads.
board: The default board to use when |path| is None.
src_image_to_delta: Image used as the base to generate the delta payloads.
timeout: Timeout for launching devserver (seconds).
"""
ds = ds_wrapper.DevServerWrapper(static_dir=DEVSERVER_STATIC_DIR,
src_image=src_image_to_delta, board=board)
req = GenerateXbuddyRequest(path, 'update')
logging.info('Starting local devserver to generate/serve payloads...')
try:
ds.Start()
url = ds.OpenURL(ds.GetURL(sub_dir=req), timeout=timeout)
ds.DownloadFile(os.path.join(url, self.ROOTFS_FILENAME), payload_dir)
ds.DownloadFile(os.path.join(url, self.STATEFUL_FILENAME), payload_dir)
except ds_wrapper.DevServerException:
logging.warning(ds.TailLog() or 'No devserver log is available.')
raise
else:
logging.debug(ds.TailLog() or 'No devserver log is available.')
finally:
ds.Stop()
if os.path.exists(ds.log_file):
shutil.copyfile(ds.log_file,
os.path.join(payload_dir, 'local_devserver.log'))
else:
logging.warning('Could not find %s', ds.log_file)
def _CheckPayloads(self, payload_dir):
"""Checks that all update payloads exists in |payload_dir|."""
filenames = []
filenames += [self.ROOTFS_FILENAME] if self.do_rootfs_update else []
filenames += [self.STATEFUL_FILENAME] if self.do_stateful_update else []
for fname in filenames:
payload = os.path.join(payload_dir, fname)
if not os.path.exists(payload):
cros_build_lib.Die('Payload %s does not exist!' % payload)
def Verify(self, old_root_dev, new_root_dev):
"""Verifies that the root deivce changed after reboot."""
assert new_root_dev and old_root_dev
if new_root_dev == old_root_dev:
raise DeviceUpdateError(
'Failed to boot into the new version. Possibly there was a '
'signing problem, or an automated rollback occurred because '
'your new image failed to boot.')
@classmethod
def GetRootDev(cls, device):
"""Get the current root device on |device|."""
rootdev = device.RunCommand(
['rootdev', '-s'], capture_output=True).output.strip()
logging.debug('Current root device is %s', rootdev)
return rootdev
def Cleanup(self):
"""Cleans up the temporary directory."""
if self.image_tempdir:
self.image_tempdir.Cleanup()
if self.static_tempdir:
self.static_tempdir.Cleanup()
if self.wipe:
logging.info('Cleaning up temporary working directory...')
osutils.RmDir(self.tempdir)
else:
logging.info('You can find the log files and/or payloads in %s',
self.tempdir)
def _CanRunDevserver(self, device, tempdir):
"""We can run devserver on |device|.
If the stateful partition is corrupted, Python or other packages
(e.g. cherrypy) that Cros Flash needs for rootfs update may be
missing on |device|.
This will also use `ldconfig` to update library paths on the target
device if it looks like that's causing problems, which is necessary
for base images.
Args:
device: A ChromiumOSDevice object.
tempdir: A temporary directory to store files.
Returns:
True if we can start devserver; False otherwise.
"""
logging.info('Checking if we can run devserver on the device.')
src_dir = self._CopyDevServerPackage(device, tempdir)
devserver_bin = os.path.join(src_dir, self.DEVSERVER_FILENAME)
devserver_check_command = ['python', devserver_bin, '--help']
try:
device.RunCommand(devserver_check_command)
except cros_build_lib.RunCommandError as e:
logging.warning('Cannot start devserver: %s', e)
if 'python: error while loading shared libraries' in str(e):
logging.info('Attempting to correct device library paths...')
try:
device.RunCommand(['ldconfig', '-r', '/'])
device.RunCommand(devserver_check_command)
logging.info('Library path correction successful.')
return True
except cros_build_lib.RunCommandError as e2:
logging.warning('Library path correction failed: %s', e2)
return False
return True
def Run(self):
"""Performs remote device update."""
old_root_dev, new_root_dev = None, None
try:
device_connected = False
with remote_access.ChromiumOSDeviceHandler(
self.ssh_hostname, port=self.ssh_port,
base_dir=self.DEVICE_BASE_DIR, ping=self.ping) as device:
device_connected = True
board = cros_build_lib.GetBoard(device_board=device.board,
override_board=self.board,
force=self.yes)
logging.info('Board is %s', board)
if os.path.isdir(self.image):
# If the given path is a directory, we use the provided
# update payload(s) in the directory.
payload_dir = self.image
logging.info('Using provided payloads in %s', payload_dir)
else:
if os.path.isfile(self.image):
# If the given path is an image, make sure devserver can
# access it and generate payloads.
logging.info('Using image %s', self.image)
image_path = self.ConvertLocalPathToXbuddyPath(self.image)
else:
# For xbuddy paths, we should do a sanity check / confirmation
# when the xbuddy board doesn't match the board on the
# device. Unfortunately this isn't currently possible since we
# don't want to duplicate xbuddy code. TODO(sosa):
# crbug.com/340722 and use it to compare boards.
device_addr = 'ssh://%s' % self.ssh_hostname
if self.ssh_port:
device_addr = '%s:%d' % (device_addr, self.ssh_port)
# Translate the xbuddy path to get the exact image to use.
translated_path = GetImagePathWithXbuddy(self.image, board,
device_addr)
logging.info('Using image %s', translated_path)
# Convert the translated path to be used in the update request.
image_path = ConvertTranslatedPath(self.image, translated_path)
# Launch a local devserver to generate/serve update payloads.
payload_dir = self.tempdir
self.GetUpdatePayloads(image_path, payload_dir,
board=board,
src_image_to_delta=self.src_image_to_delta)
# Verify that all required payloads are in the payload directory.
self._CheckPayloads(payload_dir)
restore_stateful = False
if (not self._CanRunDevserver(device, self.tempdir) and
self.do_rootfs_update):
msg = ('Cannot start devserver! The stateful partition may be '
'corrupted. Cros Flash can try to restore the stateful '
'partition first.')
restore_stateful = self.yes or cros_build_lib.BooleanPrompt(
default=False, prolog=msg)
if not restore_stateful:
cros_build_lib.Die('Cannot continue to perform rootfs update!')
if restore_stateful:
logging.warning('Restoring the stateful partition...')
payload = os.path.join(payload_dir, self.STATEFUL_FILENAME)
self.UpdateStateful(device, payload, clobber=self.clobber_stateful)
device.Reboot()
if self._CanRunDevserver(device, self.tempdir):
logging.info('Stateful partition restored.')
else:
cros_build_lib.Die('Unable to restore stateful partition. Exiting.')
# Perform device updates.
if self.do_rootfs_update:
self.SetupRootfsUpdate(device)
# Record the current root device. This must be done after
# SetupRootfsUpdate because SetupRootfsUpdate may reboot the
# device if there is a pending update, which changes the
# root device.
old_root_dev = self.GetRootDev(device)
payload = os.path.join(payload_dir, self.ROOTFS_FILENAME)
self.UpdateRootfs(device, payload, self.tempdir)
logging.info('Rootfs update completed.')
if self.do_stateful_update and not restore_stateful:
payload = os.path.join(payload_dir, self.STATEFUL_FILENAME)
self.UpdateStateful(device, payload, clobber=self.clobber_stateful)
logging.info('Stateful update completed.')
if self.reboot:
logging.info('Rebooting device..')
device.Reboot()
if self.clobber_stateful:
# --clobber-stateful wipes the stateful partition and the
# working directory on the device no longer exists. To
# remedy this, we recreate the working directory here.
device.BaseRunCommand(['mkdir', '-p', device.work_dir])
if self.do_rootfs_update and self.reboot:
logging.info('Verifying that the device has been updated...')
new_root_dev = self.GetRootDev(device)
self.Verify(old_root_dev, new_root_dev)
if self.disable_verification:
logging.info('Disabling rootfs verification on the device...')
device.DisableRootfsVerification()
except Exception:
logging.error('Device update failed.')
if device_connected and device.lsb_release:
lsb_entries = sorted(device.lsb_release.items())
logging.info('Following are the LSB version details of the device:\n%s',
'\n'.join('%s=%s' % (k, v) for k, v in lsb_entries))
raise
else:
logging.info('Update performed successfully.')
finally:
self.Cleanup()
@cros.CommandDecorator('flash')
class FlashCommand(cros.CrosCommand):
"""Update the device with an image.
This command updates the device with the image
(ssh://<hostname>:{port}, copies an image to a removable device
(usb://<device_path), or copies a xbuddy path to a local
file path with (file://file_path).
For device update, it assumes that device is able to accept ssh
connections.
For rootfs partition update, this command may launch a devserver to
generate payloads. As a side effect, it may create symlinks in
static_dir/others used by the devserver.
"""
EPILOG = """
To update/image the device with the latest locally built image:
cros flash device latest
cros flash device
To update/image the device with an xbuddy path:
cros flash device xbuddy://{local, remote}/<board>/<version>
Common xbuddy version aliases are 'latest' (alias for 'latest-stable')
latest-{dev, beta, stable, canary}, and latest-official.
To update/image the device with a local image path:
cros flash device /path/to/image.bin
Examples:
cros flash 192.168.1.7 xbuddy://remote/x86-mario/latest-canary
cros flash 192.168.1.7 xbuddy://remote/x86-mario-paladin/R32-4830.0.0-rc1
cros flash usb:// xbuddy://remote/trybot-x86-mario-paladin/R32-5189.0.0-b100
cros flash usb:///dev/sde xbuddy://peppy/latest
cros flash file:///~/images xbuddy://peppy/latest
For more information and known problems/fixes, please see:
http://dev.chromium.org/chromium-os/build/cros-flash
"""
SSH_MODE = 'ssh'
USB_MODE = 'usb'
FILE_MODE = 'file'
# Override base class property to enable stats upload.
upload_stats = True
@classmethod
def AddParser(cls, parser):
"""Add parser arguments."""
super(FlashCommand, cls).AddParser(parser)
parser.add_argument(
'device', help='ssh://device_hostname[:port] or usb://{device_path}. '
'If no device_path is given (i.e. usb://), user will be prompted to '
'choose from a list of removable devices.')
parser.add_argument(
'image', nargs='?', default='latest', help="A local path or an xbuddy "
"path: xbuddy://{local|remote}/board/version/{image_type} image_type "
"can be: 'test', 'dev', 'base', or 'recovery'. Note any strings that "
"do not map to a real file path will be converted to an xbuddy path "
"i.e., latest, will map to xbuddy://latest.")
parser.add_argument(
'--clear-cache', default=False, action='store_true',
help='Clear the devserver static directory. This deletes all the '
'downloaded images and payloads, and also payloads generated by '
'the devserver. Default is not to clear.')
update = parser.add_argument_group('Advanced device update options')
update.add_argument(
'--board', default=None, help='The board to use. By default it is '
'automatically detected. You can override the detected board with '
'this option')
update.add_argument(
'--yes', default=False, action='store_true',
help='Force yes to any prompt. Use with caution.')
update.add_argument(
'--no-reboot', action='store_false', dest='reboot', default=True,
help='Do not reboot after update. Default is always reboot.')
update.add_argument(
'--no-wipe', action='store_false', dest='wipe', default=True,
help='Do not wipe the temporary working directory. Default '
'is always wipe.')
update.add_argument(
'--no-stateful-update', action='store_false', dest='stateful_update',
help='Do not update the stateful partition on the device. '
'Default is always update.')
update.add_argument(
'--no-rootfs-update', action='store_false', dest='rootfs_update',
help='Do not update the rootfs partition on the device. '
'Default is always update.')
update.add_argument(
'--src-image-to-delta', type='path',
help='Local path to an image to be used as the base to generate '
'delta payloads.')
update.add_argument(
'--clobber-stateful', action='store_true', default=False,
help='Clobber stateful partition when performing update.')
update.add_argument(
'--no-ping', dest='ping', action='store_false', default=True,
help='Do not ping the device before attempting to connect to it.')
update.add_argument(
'--disable-rootfs-verification', default=False, action='store_true',
help='Disable rootfs verification after update is completed.')
usb = parser.add_argument_group('USB specific options')
usb.add_argument(
'--install', default=False, action='store_true',
help='Install to the USB device using the base disk layout.')
def __init__(self, options):
"""Initializes cros flash."""
cros.CrosCommand.__init__(self, options)
self.run_mode = None
self.ssh_hostname = None
self.ssh_port = None
self.usb_dev = None
self.copy_path = None
self.any = False
def _ParseDevice(self, device):
"""Parse |device| and set corresponding variables ."""
# pylint: disable=E1101
if urlparse.urlparse(device).scheme == '':
# For backward compatibility, prepend ssh:// ourselves.
device = 'ssh://%s' % device
parsed = urlparse.urlparse(device)
if parsed.scheme == self.SSH_MODE:
self.run_mode = self.SSH_MODE
self.ssh_hostname = parsed.hostname
self.ssh_port = parsed.port
elif parsed.scheme == self.USB_MODE:
self.run_mode = self.USB_MODE
self.usb_dev = device[len('%s://' % self.USB_MODE):]
elif parsed.scheme == self.FILE_MODE:
self.run_mode = self.FILE_MODE
self.copy_path = device[len('%s://' % self.FILE_MODE):]
else:
cros_build_lib.Die('Does not support device %s' % device)
# pylint: disable=E1101
def Run(self):
"""Perfrom the cros flash command."""
self.options.Freeze()
if self.options.clear_cache:
logging.info('Clearing the cache...')
ds_wrapper.DevServerWrapper.WipeStaticDirectory(DEVSERVER_STATIC_DIR)
try:
osutils.SafeMakedirsNonRoot(DEVSERVER_STATIC_DIR)
except OSError:
logging.error('Failed to create %s', DEVSERVER_STATIC_DIR)
self._ParseDevice(self.options.device)
if self.options.install:
if self.run_mode != self.USB_MODE:
logging.error('--install can only be used when writing to a USB device')
return 1
if not cros_build_lib.IsInsideChroot():
logging.error('--install can only be used inside the chroot')
return 1
try:
if self.run_mode == self.SSH_MODE:
logging.info('Preparing to update the remote device %s',
self.options.device)
updater = RemoteDeviceUpdater(
self.ssh_hostname,
self.ssh_port,
self.options.image,
board=self.options.board,
src_image_to_delta=self.options.src_image_to_delta,
rootfs_update=self.options.rootfs_update,
stateful_update=self.options.stateful_update,
clobber_stateful=self.options.clobber_stateful,
reboot=self.options.reboot,
wipe=self.options.wipe,
debug=self.options.debug,
yes=self.options.yes,
ping=self.options.ping,
disable_verification=self.options.disable_rootfs_verification)
# Perform device update.
updater.Run()
elif self.run_mode == self.USB_MODE:
path = osutils.ExpandPath(self.usb_dev) if self.usb_dev else ''
logging.info('Preparing to image the removable device %s', path)
imager = USBImager(path,
self.options.board,
self.options.image,
debug=self.options.debug,
install=self.options.install,
yes=self.options.yes)
imager.Run()
elif self.run_mode == self.FILE_MODE:
path = osutils.ExpandPath(self.copy_path) if self.copy_path else ''
logging.info('Preparing to copy image to %s', path)
imager = FileImager(path,
self.options.board,
self.options.image,
debug=self.options.debug,
yes=self.options.yes)
imager.Run()
except (Exception, KeyboardInterrupt) as e:
logging.error(e)
logging.error('Cros Flash failed before completing.')
if self.options.debug:
raise
else:
logging.info('Cros Flash completed successfully.')
| bsd-3-clause |
aksmas/rl-glue-ext | projects/codecs/Python/src/tests/test_1_environment.py | 8 | 2159 | #
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision$
# $Date$
# $Author$
# $HeadURL$
import random
import sys
from rlglue.environment.Environment import Environment
from rlglue.environment import EnvironmentLoader as EnvironmentLoader
from rlglue.types import Observation
from rlglue.types import Action
from rlglue.types import Reward_observation_terminal
class test_1_environment(Environment):
stepCount=0
o=Observation()
def env_init(self):
return "sample task spec"
def env_start(self):
self.stepCount=0
self.o.intArray=[1]
self.o.doubleArray=[0.0/2.0, 1.0/2.0]
self.o.charArray=['a','b','c']
return self.o;
def env_step(self,action):
ro=Reward_observation_terminal()
terminal=False
if self.stepCount < 5:
self.o.doubleArray=[]
self.o.charArray=[]
self.o.intArray=[self.stepCount]
self.stepCount=self.stepCount+1
if self.stepCount==5:
terminal=True
ro.r=1.0
else:
self.o.doubleArray=[0.0078125,-0.0078125,0.0,0.0078125e150,-0.0078125e150]
self.o.charArray=['g','F','?',' ','&']
self.o.intArray=[173,-173,2147483647,0,-2147483648]
ro.r=-2.0
ro.o=self.o
ro.terminal=terminal
return ro
def env_cleanup(self):
pass
def env_message(self,inMessage):
timesToPrint=self.stepCount%3
outMessage=inMessage+"|"
for i in range(0, timesToPrint):
outMessage=outMessage+"%d" % (self.stepCount)
outMessage=outMessage+"."
outMessage=outMessage+"|"+inMessage
return outMessage
if __name__=="__main__":
EnvironmentLoader.loadEnvironment(test_1_environment())
| apache-2.0 |
Jstewcoin/hash | contrib/linearize/linearize-hashes.py | 50 | 3036 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/pip/req/req_file.py | 343 | 11926 | """
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import sys
import optparse
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils.deprecation import RemovedInPip10Warning
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
cmdoptions.pre,
cmdoptions.process_dependency_links,
cmdoptions.trusted_host,
cmdoptions.require_hashes,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options,
cmdoptions.hash,
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: cli options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines_enum = preprocess(content, options)
for line_number, line in lines_enum:
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def preprocess(content, options):
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
:param options: cli options
"""
lines_enum = enumerate(content.splitlines(), start=1)
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = skip_regex(lines_enum, options)
return lines_enum
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
:param options: OptionParser options that we may update
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
if sys.version_info < (2, 7, 3):
# Prior to 2.7.3, shlex cannot deal with unicode entries
options_str = options_str.encode('utf8')
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# percolate hash-checking option upward
elif opts.require_hashes:
options.require_hashes = opts.require_hashes
# set finder options
elif finder:
if opts.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
if opts.pre:
finder.allow_all_prereleases = True
if opts.process_dependency_links:
finder.process_dependency_links = True
if opts.trusted_hosts:
finder.secure_origins.extend(
("*", host, "*") for host in opts.trusted_hosts)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(lines_enum):
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line = []
for line_number, line in lines_enum:
if not line.endswith('\\') or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = ' ' + line
if new_line:
new_line.append(line)
yield primary_line_number, ''.join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip('\\'))
# last line contains \
if new_line:
yield primary_line_number, ''.join(new_line)
# TODO: handle space after '\'.
def ignore_comments(lines_enum):
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line_number, line
def skip_regex(lines_enum, options):
"""
Skip lines that match '--skip-requirements-regex' pattern
Note: the regex pattern is only built once
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
pattern = re.compile(skip_regex)
lines_enum = filterfalse(
lambda e: pattern.search(e[1]),
lines_enum)
return lines_enum
| apache-2.0 |
mavenlin/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/softmax_centered_impl.py | 64 | 9980 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SoftmaxCentered bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"SoftmaxCentered",
]
class SoftmaxCentered(bijector.Bijector):
"""Bijector which computes `Y = g(X) = exp([X 0]) / sum(exp([X 0]))`.
To implement [softmax](https://en.wikipedia.org/wiki/Softmax_function) as a
bijection, the forward transformation appends a value to the input and the
inverse removes this coordinate. The appended coordinate represents a pivot,
e.g., `softmax(x) = exp(x-c) / sum(exp(x-c))` where `c` is the implicit last
coordinate.
Because we append a coordinate, this bijector only supports `event_ndim in [0,
1]`, i.e., scalars and vectors.
Example Use:
```python
bijector.SoftmaxCentered(event_ndims=1).forward(tf.log([2, 3, 4]))
# Result: [0.2, 0.3, 0.4, 0.1]
# Extra result: 0.1
bijector.SoftmaxCentered(event_ndims=1).inverse([0.2, 0.3, 0.4, 0.1])
# Result: tf.log([2, 3, 4])
# Extra coordinate removed.
```
At first blush it may seem like the [Invariance of domain](
https://en.wikipedia.org/wiki/Invariance_of_domain) theorem implies this
implementation is not a bijection. However, the appended dimension
makes the (forward) image non-open and the theorem does not directly apply.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softmax_centered"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 1]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 1")
self._static_event_ndims = event_ndims
super(SoftmaxCentered, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward_event_shape(self, input_shape):
if input_shape.ndims is None:
return input_shape
if input_shape.ndims != self._static_event_ndims:
raise ValueError("input_shape.dims = %d != %d" %
(input_shape.ndims, self._static_event_ndims))
if input_shape.ndims == 0:
return tensor_shape.TensorShape([2])
if input_shape.ndims == 1:
return tensor_shape.TensorShape(input_shape[0] + 1)
# Unreachable code:
raise ValueError("event_ndims = %d must be 0 or 1" % input_shape.ndims)
def _forward_event_shape_tensor(self, input_shape):
ndims = array_ops.shape(input_shape)
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_zero_or_one = check_ops.assert_equal(
ndims, 0 if self._static_event_ndims == 0 else 1,
message="event_ndims must be 0 or 1")
ndims = control_flow_ops.with_dependencies([is_zero_or_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor(
[2], dtype=dtypes.int32, name="output_shape")
return input_shape + 1
def _inverse_event_shape(self, output_shape):
if output_shape.ndims is None:
return output_shape
if output_shape.ndims != 1:
raise ValueError("output_shape.ndims = %d != 1" % output_shape.ndims)
if self._static_event_ndims == 0:
return tensor_shape.TensorShape([])
return tensor_shape.TensorShape(output_shape[0] - 1)
def _inverse_event_shape_tensor(self, output_shape):
ndims = array_ops.shape(output_shape)[0]
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_one = check_ops.assert_equal(
ndims, 1, message="event_ndims must be 1")
ndims = control_flow_ops.with_dependencies([is_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor([], dtype=dtypes.int32, name="output_shape")
return array_ops.expand_dims(output_shape[0] - 1, dim=0)
def _forward(self, x):
# Pad the last dim with a zeros vector. We need this because it lets us
# infer the scale in the inverse function.
y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
else array_ops.rank(y))
y = array_ops.pad(y,
paddings=array_ops.concat(
(array_ops.zeros(
(ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
0))
# Set shape hints.
if x.get_shape().ndims is not None:
shape = x.get_shape().as_list()
if self._static_event_ndims == 0:
shape += [2]
elif shape[-1] is not None:
shape[-1] += 1
shape = tensor_shape.TensorShape(shape)
y.get_shape().assert_is_compatible_with(shape)
y.set_shape(shape)
# Since we only support event_ndims in [0, 1] and we do padding, we always
# reduce over the last dimension, i.e., dim=-1 (which is the default).
return nn_ops.softmax(y)
def _inverse(self, y):
# To derive the inverse mapping note that:
# y[i] = exp(x[i]) / normalization
# and
# y[end] = 1 / normalization.
# Thus:
# x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
# = log(exp(x[i])/normalization) - log(y[end])
# = log(y[i]) - log(y[end])
shape = (np.asarray(y.get_shape().as_list(), dtype=np.int32)
if y.get_shape().is_fully_defined()
else array_ops.shape(y, name="shape"))
ndims = y.get_shape().ndims or math_ops.rank(y, name="ndims")
# Do this first to make sure CSE catches that it'll happen again in
# _inverse_log_det_jacobian.
x = math_ops.log(y)
# We now extract the last coordinate of the rightmost dimension.
# Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
begin = array_ops.one_hot(indices=ndims-1,
depth=ndims,
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
dtype=shape.dtype)
size = array_ops.concat([shape[:-1], np.asarray([1], dtype=shape.dtype)], 0)
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
# Here we slice out all but the last coordinate; see above for idea.
begin = array_ops.zeros_like(shape)
size = array_ops.concat([shape[:-1], [shape[-1] - 1]], 0)
x = array_ops.strided_slice(x, begin, begin + size)
x += log_normalization
if self._static_event_ndims == 0:
x = array_ops.squeeze(x, squeeze_dims=[ndims-1])
# Set shape hints.
if y.get_shape().ndims is not None:
shape = y.get_shape().as_list()
if self._static_event_ndims == 0:
shape = shape[:-1]
elif shape[-1] is not None:
shape[-1] -= 1
shape = tensor_shape.TensorShape(shape)
x.get_shape().assert_is_compatible_with(shape)
x.set_shape(shape)
return x
def _inverse_log_det_jacobian(self, y):
# WLOG, consider the vector case:
# x = log(y[:-1]) - log(y[-1])
# where,
# y[-1] = 1 - sum(y[:-1]).
# We have:
# det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] }
# = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } } (1)
# = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] }
# = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) *
# det(diag(y[:-1])) } (2)
# = 1 / { y[-1] prod(y[:-1]) }
# = 1 / prod(y)
# (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
# or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector
# docstring "Tip".
# (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma
return -math_ops.reduce_sum(math_ops.log(y), axis=-1)
def _forward_log_det_jacobian(self, x):
if self._static_event_ndims == 0:
return x - 2. * nn_ops.softplus(x)
else:
# This code is similar to nn_ops.log_softmax but different because we have
# an implicit zero column to handle. I.e., instead of:
# reduce_sum(logits - reduce_sum(exp(logits), dim))
# we must do:
# log_normalization = 1 + reduce_sum(exp(logits))
# -log_normalization + reduce_sum(logits - log_normalization)
log_normalization = nn_ops.softplus(
math_ops.reduce_logsumexp(x, axis=-1, keep_dims=True))
fldj = (-log_normalization +
math_ops.reduce_sum(x - log_normalization,
axis=-1,
keep_dims=True))
return array_ops.squeeze(fldj, squeeze_dims=-1)
| apache-2.0 |
NINAnor/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 1 | 10160 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import matplotlib.pyplot
hasMatplotlib = True
except:
hasMatplotlib = False
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.script.ScriptUtils import ScriptUtils
from RegularPoints import RegularPoints
from SymmetricalDifference import SymmetricalDifference
from VectorSplit import VectorSplit
from VectorGrid import VectorGrid
from RandomExtract import RandomExtract
from RandomExtractWithinSubsets import RandomExtractWithinSubsets
from ExtractByLocation import ExtractByLocation
from PointsInPolygon import PointsInPolygon
from PointsInPolygonUnique import PointsInPolygonUnique
from PointsInPolygonWeighted import PointsInPolygonWeighted
from SumLines import SumLines
from BasicStatisticsNumbers import BasicStatisticsNumbers
from BasicStatisticsStrings import BasicStatisticsStrings
from NearestNeighbourAnalysis import NearestNeighbourAnalysis
from LinesIntersection import LinesIntersection
from MeanCoords import MeanCoords
from PointDistance import PointDistance
from UniqueValues import UniqueValues
from ReprojectLayer import ReprojectLayer
from ExportGeometryInfo import ExportGeometryInfo
from Centroids import Centroids
from Delaunay import Delaunay
from VoronoiPolygons import VoronoiPolygons
from DensifyGeometries import DensifyGeometries
from MultipartToSingleparts import MultipartToSingleparts
from SimplifyGeometries import SimplifyGeometries
from LinesToPolygons import LinesToPolygons
from PolygonsToLines import PolygonsToLines
from SinglePartsToMultiparts import SinglePartsToMultiparts
from ExtractNodes import ExtractNodes
from ConvexHull import ConvexHull
from FixedDistanceBuffer import FixedDistanceBuffer
from VariableDistanceBuffer import VariableDistanceBuffer
from Clip import Clip
from Difference import Difference
from Dissolve import Dissolve
from Intersection import Intersection
from ExtentFromLayer import ExtentFromLayer
from RandomSelection import RandomSelection
from RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from SelectByLocation import SelectByLocation
from Union import Union
from DensifyGeometriesInterval import DensifyGeometriesInterval
from Eliminate import Eliminate
from SpatialJoin import SpatialJoin
from DeleteColumn import DeleteColumn
from DeleteHoles import DeleteHoles
from DeleteDuplicateGeometries import DeleteDuplicateGeometries
from TextToFloat import TextToFloat
from ExtractByAttribute import ExtractByAttribute
from SelectByAttribute import SelectByAttribute
from Grid import Grid
from Gridify import Gridify
from HubDistance import HubDistance
from HubLines import HubLines
from Merge import Merge
from GeometryConvert import GeometryConvert
from ConcaveHull import ConcaveHull
from Polygonize import Polygonize
from RasterLayerStatistics import RasterLayerStatistics
from StatisticsByCategories import StatisticsByCategories
from EquivalentNumField import EquivalentNumField
from AddTableField import AddTableField
from FieldsCalculator import FieldsCalculator
from SaveSelectedFeatures import SaveSelectedFeatures
from Explode import Explode
from AutoincrementalField import AutoincrementalField
from FieldPyculator import FieldsPyculator
from JoinAttributes import JoinAttributes
from CreateConstantRaster import CreateConstantRaster
from PointsLayerFromTable import PointsLayerFromTable
from PointsDisplacement import PointsDisplacement
from ZonalStatistics import ZonalStatistics
from PointsFromPolygons import PointsFromPolygons
from PointsFromLines import PointsFromLines
from RandomPointsExtent import RandomPointsExtent
from RandomPointsLayer import RandomPointsLayer
from RandomPointsPolygonsFixed import RandomPointsPolygonsFixed
from RandomPointsPolygonsVariable import RandomPointsPolygonsVariable
from RandomPointsAlongLines import RandomPointsAlongLines
from PointsToPaths import PointsToPaths
from PostGISExecuteSQL import PostGISExecuteSQL
from ImportIntoPostGIS import ImportIntoPostGIS
from SetVectorStyle import SetVectorStyle
from SetRasterStyle import SetRasterStyle
from SelectByExpression import SelectByExpression
from SelectByAttributeSum import SelectByAttributeSum
from HypsometricCurves import HypsometricCurves
from SplitLinesWithLines import SplitLinesWithLines
from FieldsMapper import FieldsMapper
from Datasources2Vrt import Datasources2Vrt
from CheckValidity import CheckValidity
from OrientedMinimumBoundingBox import OrientedMinimumBoundingBox
from Smooth import Smooth
from ReverseLineDirection import ReverseLineDirection
from ExecuteSQL import ExecuteSQL
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QGISAlgorithmProvider(AlgorithmProvider):
def __init__(self):
AlgorithmProvider.__init__(self)
self._icon = QIcon(os.path.join(pluginPath, 'images', 'qgis.png'))
self.alglist = [SumLines(), PointsInPolygon(),
PointsInPolygonWeighted(), PointsInPolygonUnique(),
BasicStatisticsStrings(), BasicStatisticsNumbers(),
NearestNeighbourAnalysis(), MeanCoords(),
LinesIntersection(), UniqueValues(), PointDistance(),
ReprojectLayer(), ExportGeometryInfo(), Centroids(),
Delaunay(), VoronoiPolygons(), SimplifyGeometries(),
DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(),
PolygonsToLines(), LinesToPolygons(), ExtractNodes(),
Eliminate(), ConvexHull(), FixedDistanceBuffer(),
VariableDistanceBuffer(), Dissolve(), Difference(),
Intersection(), Union(), Clip(), ExtentFromLayer(),
RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(), RandomExtract(), DeleteHoles(),
RandomExtractWithinSubsets(), ExtractByLocation(),
SpatialJoin(), RegularPoints(), SymmetricalDifference(),
VectorSplit(), VectorGrid(), DeleteColumn(),
DeleteDuplicateGeometries(), TextToFloat(),
ExtractByAttribute(), SelectByAttribute(), Grid(),
Gridify(), HubDistance(), HubLines(), Merge(),
GeometryConvert(), AddTableField(), FieldsCalculator(),
SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(),
EquivalentNumField(), PointsLayerFromTable(),
StatisticsByCategories(), ConcaveHull(), Polygonize(),
RasterLayerStatistics(), PointsDisplacement(),
ZonalStatistics(), PointsFromPolygons(),
PointsFromLines(), RandomPointsExtent(),
RandomPointsLayer(), RandomPointsPolygonsFixed(),
RandomPointsPolygonsVariable(),
RandomPointsAlongLines(), PointsToPaths(),
PostGISExecuteSQL(), ImportIntoPostGIS(),
SetVectorStyle(), SetRasterStyle(),
SelectByExpression(), HypsometricCurves(),
SplitLinesWithLines(), CreateConstantRaster(),
FieldsMapper(), SelectByAttributeSum(), Datasources2Vrt(),
CheckValidity(), OrientedMinimumBoundingBox(), Smooth(),
ReverseLineDirection(), ExecuteSQL()
]
if hasMatplotlib:
from VectorLayerHistogram import VectorLayerHistogram
from RasterLayerHistogram import RasterLayerHistogram
from VectorLayerScatterplot import VectorLayerScatterplot
from MeanAndStdDevPlot import MeanAndStdDevPlot
from BarPlot import BarPlot
from PolarPlot import PolarPlot
self.alglist.extend([
VectorLayerHistogram(), RasterLayerHistogram(),
VectorLayerScatterplot(), MeanAndStdDevPlot(), BarPlot(),
PolarPlot(),
])
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
self.alglist.extend(scripts)
for alg in self.alglist:
alg._icon = self._icon
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'qgis'
def getDescription(self):
return self.tr('QGIS geoalgorithms')
def getIcon(self):
return self._icon
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
etkirsch/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
ryfeus/lambda-packs | Selenium_PhantomJS/source/pip/_vendor/retrying.py | 934 | 9972 | ## Copyright 2013-2014 Ray Holder
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import random
from pip._vendor import six
import sys
import time
import traceback
# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint...
MAX_WAIT = 1073741823
def retry(*dargs, **dkw):
"""
Decorator function that instantiates the Retrying object
@param *dargs: positional arguments passed to Retrying object
@param **dkw: keyword arguments passed to the Retrying object
"""
# support both @retry and @retry() as valid syntax
if len(dargs) == 1 and callable(dargs[0]):
def wrap_simple(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying().call(f, *args, **kw)
return wrapped_f
return wrap_simple(dargs[0])
else:
def wrap(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying(*dargs, **dkw).call(f, *args, **kw)
return wrapped_f
return wrap
class Retrying(object):
def __init__(self,
stop=None, wait=None,
stop_max_attempt_number=None,
stop_max_delay=None,
wait_fixed=None,
wait_random_min=None, wait_random_max=None,
wait_incrementing_start=None, wait_incrementing_increment=None,
wait_exponential_multiplier=None, wait_exponential_max=None,
retry_on_exception=None,
retry_on_result=None,
wrap_exception=False,
stop_func=None,
wait_func=None,
wait_jitter_max=None):
self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number
self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay
self._wait_fixed = 1000 if wait_fixed is None else wait_fixed
self._wait_random_min = 0 if wait_random_min is None else wait_random_min
self._wait_random_max = 1000 if wait_random_max is None else wait_random_max
self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start
self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment
self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier
self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max
self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max
# TODO add chaining of stop behaviors
# stop behavior
stop_funcs = []
if stop_max_attempt_number is not None:
stop_funcs.append(self.stop_after_attempt)
if stop_max_delay is not None:
stop_funcs.append(self.stop_after_delay)
if stop_func is not None:
self.stop = stop_func
elif stop is None:
self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs)
else:
self.stop = getattr(self, stop)
# TODO add chaining of wait behaviors
# wait behavior
wait_funcs = [lambda *args, **kwargs: 0]
if wait_fixed is not None:
wait_funcs.append(self.fixed_sleep)
if wait_random_min is not None or wait_random_max is not None:
wait_funcs.append(self.random_sleep)
if wait_incrementing_start is not None or wait_incrementing_increment is not None:
wait_funcs.append(self.incrementing_sleep)
if wait_exponential_multiplier is not None or wait_exponential_max is not None:
wait_funcs.append(self.exponential_sleep)
if wait_func is not None:
self.wait = wait_func
elif wait is None:
self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs)
else:
self.wait = getattr(self, wait)
# retry on exception filter
if retry_on_exception is None:
self._retry_on_exception = self.always_reject
else:
self._retry_on_exception = retry_on_exception
# TODO simplify retrying by Exception types
# retry on result filter
if retry_on_result is None:
self._retry_on_result = self.never_reject
else:
self._retry_on_result = retry_on_result
self._wrap_exception = wrap_exception
def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Stop after the previous attempt >= stop_max_attempt_number."""
return previous_attempt_number >= self._stop_max_attempt_number
def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Stop after the time from the first attempt >= stop_max_delay."""
return delay_since_first_attempt_ms >= self._stop_max_delay
def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Don't sleep at all before retrying."""
return 0
def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Sleep a fixed amount of time between each retry."""
return self._wait_fixed
def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Sleep a random amount of time between wait_random_min and wait_random_max"""
return random.randint(self._wait_random_min, self._wait_random_max)
def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""
Sleep an incremental amount of time after each attempt, starting at
wait_incrementing_start and incrementing by wait_incrementing_increment
"""
result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1))
if result < 0:
result = 0
return result
def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
exp = 2 ** previous_attempt_number
result = self._wait_exponential_multiplier * exp
if result > self._wait_exponential_max:
result = self._wait_exponential_max
if result < 0:
result = 0
return result
def never_reject(self, result):
return False
def always_reject(self, result):
return True
def should_reject(self, attempt):
reject = False
if attempt.has_exception:
reject |= self._retry_on_exception(attempt.value[1])
else:
reject |= self._retry_on_result(attempt.value)
return reject
def call(self, fn, *args, **kwargs):
start_time = int(round(time.time() * 1000))
attempt_number = 1
while True:
try:
attempt = Attempt(fn(*args, **kwargs), attempt_number, False)
except:
tb = sys.exc_info()
attempt = Attempt(tb, attempt_number, True)
if not self.should_reject(attempt):
return attempt.get(self._wrap_exception)
delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time
if self.stop(attempt_number, delay_since_first_attempt_ms):
if not self._wrap_exception and attempt.has_exception:
# get() on an attempt with an exception should cause it to be raised, but raise just in case
raise attempt.get()
else:
raise RetryError(attempt)
else:
sleep = self.wait(attempt_number, delay_since_first_attempt_ms)
if self._wait_jitter_max:
jitter = random.random() * self._wait_jitter_max
sleep = sleep + max(0, jitter)
time.sleep(sleep / 1000.0)
attempt_number += 1
class Attempt(object):
"""
An Attempt encapsulates a call to a target function that may end as a
normal return value from the function or an Exception depending on what
occurred during the execution.
"""
def __init__(self, value, attempt_number, has_exception):
self.value = value
self.attempt_number = attempt_number
self.has_exception = has_exception
def get(self, wrap_exception=False):
"""
Return the return value of this Attempt instance or raise an Exception.
If wrap_exception is true, this Attempt is wrapped inside of a
RetryError before being raised.
"""
if self.has_exception:
if wrap_exception:
raise RetryError(self)
else:
six.reraise(self.value[0], self.value[1], self.value[2])
else:
return self.value
def __repr__(self):
if self.has_exception:
return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2])))
else:
return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value)
class RetryError(Exception):
"""
A RetryError encapsulates the last Attempt instance right before giving up.
"""
def __init__(self, last_attempt):
self.last_attempt = last_attempt
def __str__(self):
return "RetryError[{0}]".format(self.last_attempt)
| mit |
gquirozbogner/contentbox-master | third_party/unidecode/x05a.py | 4 | 4894 | data = (
'Song ', # 0x00
'Wei ', # 0x01
'Hong ', # 0x02
'Wa ', # 0x03
'Lou ', # 0x04
'Ya ', # 0x05
'Rao ', # 0x06
'Jiao ', # 0x07
'Luan ', # 0x08
'Ping ', # 0x09
'Xian ', # 0x0a
'Shao ', # 0x0b
'Li ', # 0x0c
'Cheng ', # 0x0d
'Xiao ', # 0x0e
'Mang ', # 0x0f
'Fu ', # 0x10
'Suo ', # 0x11
'Wu ', # 0x12
'Wei ', # 0x13
'Ke ', # 0x14
'Lai ', # 0x15
'Chuo ', # 0x16
'Ding ', # 0x17
'Niang ', # 0x18
'Xing ', # 0x19
'Nan ', # 0x1a
'Yu ', # 0x1b
'Nuo ', # 0x1c
'Pei ', # 0x1d
'Nei ', # 0x1e
'Juan ', # 0x1f
'Shen ', # 0x20
'Zhi ', # 0x21
'Han ', # 0x22
'Di ', # 0x23
'Zhuang ', # 0x24
'E ', # 0x25
'Pin ', # 0x26
'Tui ', # 0x27
'Han ', # 0x28
'Mian ', # 0x29
'Wu ', # 0x2a
'Yan ', # 0x2b
'Wu ', # 0x2c
'Xi ', # 0x2d
'Yan ', # 0x2e
'Yu ', # 0x2f
'Si ', # 0x30
'Yu ', # 0x31
'Wa ', # 0x32
'[?] ', # 0x33
'Xian ', # 0x34
'Ju ', # 0x35
'Qu ', # 0x36
'Shui ', # 0x37
'Qi ', # 0x38
'Xian ', # 0x39
'Zhui ', # 0x3a
'Dong ', # 0x3b
'Chang ', # 0x3c
'Lu ', # 0x3d
'Ai ', # 0x3e
'E ', # 0x3f
'E ', # 0x40
'Lou ', # 0x41
'Mian ', # 0x42
'Cong ', # 0x43
'Pou ', # 0x44
'Ju ', # 0x45
'Po ', # 0x46
'Cai ', # 0x47
'Ding ', # 0x48
'Wan ', # 0x49
'Biao ', # 0x4a
'Xiao ', # 0x4b
'Shu ', # 0x4c
'Qi ', # 0x4d
'Hui ', # 0x4e
'Fu ', # 0x4f
'E ', # 0x50
'Wo ', # 0x51
'Tan ', # 0x52
'Fei ', # 0x53
'Wei ', # 0x54
'Jie ', # 0x55
'Tian ', # 0x56
'Ni ', # 0x57
'Quan ', # 0x58
'Jing ', # 0x59
'Hun ', # 0x5a
'Jing ', # 0x5b
'Qian ', # 0x5c
'Dian ', # 0x5d
'Xing ', # 0x5e
'Hu ', # 0x5f
'Wa ', # 0x60
'Lai ', # 0x61
'Bi ', # 0x62
'Yin ', # 0x63
'Chou ', # 0x64
'Chuo ', # 0x65
'Fu ', # 0x66
'Jing ', # 0x67
'Lun ', # 0x68
'Yan ', # 0x69
'Lan ', # 0x6a
'Kun ', # 0x6b
'Yin ', # 0x6c
'Ya ', # 0x6d
'Ju ', # 0x6e
'Li ', # 0x6f
'Dian ', # 0x70
'Xian ', # 0x71
'Hwa ', # 0x72
'Hua ', # 0x73
'Ying ', # 0x74
'Chan ', # 0x75
'Shen ', # 0x76
'Ting ', # 0x77
'Dang ', # 0x78
'Yao ', # 0x79
'Wu ', # 0x7a
'Nan ', # 0x7b
'Ruo ', # 0x7c
'Jia ', # 0x7d
'Tou ', # 0x7e
'Xu ', # 0x7f
'Yu ', # 0x80
'Wei ', # 0x81
'Ti ', # 0x82
'Rou ', # 0x83
'Mei ', # 0x84
'Dan ', # 0x85
'Ruan ', # 0x86
'Qin ', # 0x87
'Hui ', # 0x88
'Wu ', # 0x89
'Qian ', # 0x8a
'Chun ', # 0x8b
'Mao ', # 0x8c
'Fu ', # 0x8d
'Jie ', # 0x8e
'Duan ', # 0x8f
'Xi ', # 0x90
'Zhong ', # 0x91
'Mei ', # 0x92
'Huang ', # 0x93
'Mian ', # 0x94
'An ', # 0x95
'Ying ', # 0x96
'Xuan ', # 0x97
'Jie ', # 0x98
'Wei ', # 0x99
'Mei ', # 0x9a
'Yuan ', # 0x9b
'Zhen ', # 0x9c
'Qiu ', # 0x9d
'Ti ', # 0x9e
'Xie ', # 0x9f
'Tuo ', # 0xa0
'Lian ', # 0xa1
'Mao ', # 0xa2
'Ran ', # 0xa3
'Si ', # 0xa4
'Pian ', # 0xa5
'Wei ', # 0xa6
'Wa ', # 0xa7
'Jiu ', # 0xa8
'Hu ', # 0xa9
'Ao ', # 0xaa
'[?] ', # 0xab
'Bou ', # 0xac
'Xu ', # 0xad
'Tou ', # 0xae
'Gui ', # 0xaf
'Zou ', # 0xb0
'Yao ', # 0xb1
'Pi ', # 0xb2
'Xi ', # 0xb3
'Yuan ', # 0xb4
'Ying ', # 0xb5
'Rong ', # 0xb6
'Ru ', # 0xb7
'Chi ', # 0xb8
'Liu ', # 0xb9
'Mei ', # 0xba
'Pan ', # 0xbb
'Ao ', # 0xbc
'Ma ', # 0xbd
'Gou ', # 0xbe
'Kui ', # 0xbf
'Qin ', # 0xc0
'Jia ', # 0xc1
'Sao ', # 0xc2
'Zhen ', # 0xc3
'Yuan ', # 0xc4
'Cha ', # 0xc5
'Yong ', # 0xc6
'Ming ', # 0xc7
'Ying ', # 0xc8
'Ji ', # 0xc9
'Su ', # 0xca
'Niao ', # 0xcb
'Xian ', # 0xcc
'Tao ', # 0xcd
'Pang ', # 0xce
'Lang ', # 0xcf
'Nao ', # 0xd0
'Bao ', # 0xd1
'Ai ', # 0xd2
'Pi ', # 0xd3
'Pin ', # 0xd4
'Yi ', # 0xd5
'Piao ', # 0xd6
'Yu ', # 0xd7
'Lei ', # 0xd8
'Xuan ', # 0xd9
'Man ', # 0xda
'Yi ', # 0xdb
'Zhang ', # 0xdc
'Kang ', # 0xdd
'Yong ', # 0xde
'Ni ', # 0xdf
'Li ', # 0xe0
'Di ', # 0xe1
'Gui ', # 0xe2
'Yan ', # 0xe3
'Jin ', # 0xe4
'Zhuan ', # 0xe5
'Chang ', # 0xe6
'Ce ', # 0xe7
'Han ', # 0xe8
'Nen ', # 0xe9
'Lao ', # 0xea
'Mo ', # 0xeb
'Zhe ', # 0xec
'Hu ', # 0xed
'Hu ', # 0xee
'Ao ', # 0xef
'Nen ', # 0xf0
'Qiang ', # 0xf1
'Ma ', # 0xf2
'Pie ', # 0xf3
'Gu ', # 0xf4
'Wu ', # 0xf5
'Jiao ', # 0xf6
'Tuo ', # 0xf7
'Zhan ', # 0xf8
'Mao ', # 0xf9
'Xian ', # 0xfa
'Xian ', # 0xfb
'Mo ', # 0xfc
'Liao ', # 0xfd
'Lian ', # 0xfe
'Hua ', # 0xff
)
| apache-2.0 |
python-provy/provy | tests/unit/more/centos/package/test_yum.py | 1 | 8675 | from datetime import datetime, timedelta
import sys
from mock import patch, MagicMock
from nose.tools import istest
from provy.more.centos import YumRole, PackageNotFound
from provy.more.centos.package import yum
from tests.unit.tools.helpers import ProvyTestCase
class YumRoleTest(ProvyTestCase):
def setUp(self):
super(YumRoleTest, self).setUp()
self.role = YumRole(prov=None, context={})
@istest
def installs_necessary_packages_to_provision(self):
with self.mock_role_methods('ensure_up_to_date', 'ensure_package_installed'):
self.role.provision()
self.role.ensure_up_to_date.assert_called_once_with()
self.role.ensure_package_installed.assert_called_once_with('curl')
@istest
def ensures_gpg_key_is_added(self):
with self.execute_mock():
self.role.ensure_gpg_key('http://some.repo')
self.role.execute.assert_called_once_with('curl http://some.repo | rpm --import -', sudo=True, stdout=False)
@istest
def checks_that_repository_exists_in_yum_repos(self):
with self.execute_mock() as execute:
execute.return_value = '''
some
repo
foo-bar
'''
result = self.role.has_source('foo-bar')
self.assertTrue(result)
execute.assert_called_once_with("cat /etc/yum.repos.d/CentOS-Base.repo", sudo=True, stdout=False)
@istest
def checks_that_repository_doesnt_exist_in_apt_source(self):
with self.execute_mock() as execute:
execute.return_value = 'some repo'
result = self.role.has_source('foo-bar')
self.assertFalse(result)
@istest
def ensures_a_source_string_is_added_to_the_repos(self):
source_line = 'foo-bar-repo'
with self.execute_mock() as execute, self.mock_role_method('has_source') as has_source:
has_source.return_value = False
self.assertTrue(self.role.ensure_yum_source(source_line))
self.assertTrue(has_source.called)
execute.assert_called_once_with('echo "{}" >> /etc/yum.repos.d/CentOS-Base.repo'.format(source_line), sudo=True, stdout=False)
@istest
def doesnt_add_source_if_it_already_exists(self):
source_line = 'foo-bar-repo'
with self.execute_mock() as execute, self.mock_role_method('has_source') as has_source:
has_source.return_value = True
self.assertFalse(self.role.ensure_yum_source(source_line))
self.assertFalse(execute.called)
@istest
def gets_update_date_file_as_a_property(self):
with self.mock_role_method('remote_temp_dir'):
self.role.remote_temp_dir.return_value = '/foo/bar'
self.assertEqual(self.role.update_date_file, '/foo/bar/last_yum_update')
@istest
def stores_update_date(self):
with self.mock_role_methods('update_date_file', 'execute'), patch.object(yum, 'datetime') as mock_datetime:
self.role.update_date_file = '/foo/bar'
when = datetime.strptime('2013-01-01', '%Y-%m-%d')
mock_datetime.now.return_value = when
self.role.store_update_date()
self.role.execute.assert_called_once_with('echo "01-01-13 00:00:00" > /foo/bar', stdout=False)
@istest
def gets_last_update_date(self):
with self.mock_role_methods('remote_exists', 'update_date_file', 'read_remote_file'):
self.role.update_date_file = '/foo/bar'
self.role.remote_exists.return_value = True
self.role.read_remote_file.return_value = '01-01-13 00:00:00'
result = self.role.get_last_update_date()
self.assertEqual(result, datetime.strptime('2013-01-01', '%Y-%m-%d'))
self.role.remote_exists.assert_called_once_with(self.role.update_date_file)
self.role.read_remote_file.assert_called_once_with(self.role.update_date_file)
@istest
def gets_none_as_last_update_if_there_was_no_update_yet(self):
with self.mock_role_methods('remote_exists', 'update_date_file', 'read_remote_file'):
self.role.update_date_file = '/foo/bar'
self.role.remote_exists.return_value = False
result = self.role.get_last_update_date()
self.assertIsNone(result)
self.assertFalse(self.role.read_remote_file.called)
@istest
def updates_yum_when_passed_time_limit(self):
with patch.object(yum, 'datetime') as mock_datetime, self.mock_role_methods('get_last_update_date', 'force_update'):
now = datetime.strptime('2013-01-01', '%Y-%m-%d')
then = now - timedelta(minutes=31)
mock_datetime.now.return_value = now
self.role.get_last_update_date.return_value = then
self.role.ensure_up_to_date()
self.role.get_last_update_date.assert_called_once_with()
self.role.force_update.assert_called_once_with()
@istest
def doesnt_update_if_not_passed_from_time_limit(self):
with patch.object(yum, 'datetime') as mock_datetime, self.mock_role_methods('get_last_update_date', 'force_update'):
now = datetime.strptime('2013-01-01', '%Y-%m-%d')
then = now - timedelta(minutes=29)
mock_datetime.now.return_value = now
self.role.get_last_update_date.return_value = then
self.role.ensure_up_to_date()
self.assertFalse(self.role.force_update.called)
@istest
def forces_an_update(self):
with self.mock_role_methods('execute', 'store_update_date'):
self.role.force_update()
self.assertTrue(self.role.context['yum-up-to-date'])
self.role.execute.assert_called_once_with('yum clean all', stdout=False, sudo=True)
self.role.store_update_date.assert_called_once_with()
@istest
def checks_that_a_package_is_installed(self):
with self.execute_mock() as execute:
execute.return_value = '''yes'''
self.assertTrue(self.role.is_package_installed('foo'))
execute.assert_called_once_with('rpm -qa foo', sudo=True, stdout=False)
@istest
def checks_that_a_package_is_not_installed(self):
with self.execute_mock() as execute:
execute.return_value = ''''''
self.assertFalse(self.role.is_package_installed('baz'))
execute.assert_called_once_with('rpm -qa baz', sudo=True, stdout=False)
@istest
def checks_that_a_package_exists(self):
with self.execute_mock() as execute:
self.assertTrue(self.role.package_exists('python'))
execute.assert_called_with('yum info -q python', stdout=False)
@istest
def checks_that_a_package_doesnt_exist(self):
with self.execute_mock() as execute:
execute.return_value = False
self.assertFalse(self.role.package_exists('phyton'))
execute.assert_called_with('yum info -q phyton', stdout=False)
@istest
def traps_sys_exit_when_checking_if_a_package_exists(self):
def exit(*args, **kwargs):
sys.exit(1)
execute = MagicMock(side_effect=exit)
with patch('provy.core.roles.Role.execute', execute):
self.assertFalse(self.role.package_exists('phyton'))
@istest
def checks_if_a_package_exists_before_installing(self):
with self.execute_mock() as execute, self.mock_role_methods('package_exists', 'is_package_installed') as (package_exists, is_package_installed):
is_package_installed.return_value = False
package_exists.return_value = True
result = self.role.ensure_package_installed('python')
self.assertTrue(result)
self.assertTrue(package_exists.called)
execute.assert_called_with('yum install -y python', stdout=False, sudo=True)
@istest
def fails_to_install_package_if_it_doesnt_exist(self):
with self.execute_mock(), self.mock_role_methods('package_exists', 'is_package_installed') as (package_exists, is_package_installed):
is_package_installed.return_value = False
package_exists.return_value = False
self.assertRaises(PackageNotFound, self.role.ensure_package_installed, 'phyton')
self.assertTrue(package_exists.called)
@istest
def doesnt_install_package_if_already_installed(self):
with self.mock_role_method('is_package_installed'):
self.role.is_package_installed.return_value = True
result = self.role.ensure_package_installed('python')
self.assertFalse(result)
| mit |
nash-x/hws | nova/api/openstack/compute/plugins/v3/keypairs.py | 15 | 7040 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack.compute.schemas.v3 import keypairs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.i18n import _
ALIAS = 'os-keypairs'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
soft_authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class KeypairController(object):
"""Keypair API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.KeypairAPI()
def _filter_keypair(self, keypair, **attrs):
clean = {
'name': keypair.name,
'public_key': keypair.public_key,
'fingerprint': keypair.fingerprint,
}
for attr in attrs:
clean[attr] = keypair[attr]
return clean
# TODO(oomichi): Here should be 201(Created) instead of 200 by v2.1
# +microversions because the keypair creation finishes when returning
# a response.
@extensions.expected_errors((400, 403, 409))
@validation.schema(keypairs.create)
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
context = req.environ['nova.context']
authorize(context, action='create')
params = body['keypair']
name = params['name']
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(context,
context.user_id, name,
params['public_key'])
keypair = self._filter_keypair(keypair, user_id=True)
else:
keypair, private_key = self.api.create_key_pair(
context, context.user_id, name)
keypair = self._filter_keypair(keypair, user_id=True)
keypair['private_key'] = private_key
return {'keypair': keypair}
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
# TODO(oomichi): Here should be 204(No Content) instead of 202 by v2.1
# +microversions because the resource keypair has been deleted completely
# when returning a response.
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
authorize(context, action='delete')
try:
self.api.delete_key_pair(context, context.user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data for the given key name."""
context = req.environ['nova.context']
authorize(context, action='show')
try:
keypair = self.api.get_key_pair(context, context.user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
# TODO(oomichi): It is necessary to filter a response of keypair with
# _filter_keypair() when v2.1+microversions for implementing consistent
# behaviors in this keypair resource.
return {'keypair': keypair}
@extensions.expected_errors(())
def index(self, req):
"""List of keypairs for a user."""
context = req.environ['nova.context']
authorize(context, action='index')
key_pairs = self.api.get_key_pairs(context, context.user_id)
rval = []
for key_pair in key_pairs:
rval.append({'keypair': self._filter_keypair(key_pair)})
return {'keypairs': rval}
class Controller(wsgi.Controller):
def _add_key_name(self, req, servers):
for server in servers:
db_server = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show'/'detail' methods.
server['key_name'] = db_server['key_name']
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
server = resp_obj.obj['server']
self._add_key_name(req, [server])
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if 'servers' in resp_obj.obj and soft_authorize(context):
servers = resp_obj.obj['servers']
self._add_key_name(req, servers)
class Keypairs(extensions.V3APIExtensionBase):
"""Keypair Support."""
name = "Keypairs"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(ALIAS,
KeypairController())]
return resources
def get_controller_extensions(self):
controller = Controller()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
create_kwargs['key_name'] = server_dict.get('key_name')
def get_server_create_schema(self):
return keypairs.server_create
| apache-2.0 |
Dhrumil1808/phpmyadmin | doc/_ext/configext.py | 141 | 6618 | from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.domains.std import GenericObject, StandardDomain
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import clean_astext, make_refnode
from sphinx.util import ws_re
from sphinx import addnodes
from sphinx.util.docfields import Field
from docutils import nodes
def get_id_from_cfg(text):
'''
Formats anchor ID from config option.
'''
if text[:6] == '$cfg[\'':
text = text[6:]
if text[-2:] == '\']':
text = text[:-2]
text = text.replace('[$i]', '')
parts = text.split("']['")
return parts
class ConfigOption(ObjectDescription):
indextemplate = 'configuration option; %s'
parse_node = None
has_arguments = True
doc_field_types = [
Field('default', label='Default value', has_arg=False,
names=('default', )),
Field('type', label='Type', has_arg=False,
names=('type',)),
]
def handle_signature(self, sig, signode):
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetparts = get_id_from_cfg(name)
targetname = 'cfg_%s' % '_'.join(targetparts)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
indextype = 'single'
# Generic index entries
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.indexnode['entries'].append((indextype, name,
targetname, targetname))
# Server section
if targetparts[0] == 'Servers' and len(targetparts) > 1:
indexname = ', '.join(targetparts[1:])
self.indexnode['entries'].append((indextype, 'server configuration; %s' % indexname,
targetname, targetname))
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
else:
indexname = ', '.join(targetparts)
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigSectionXRefRole(XRefRole):
"""
Cross-referencing role for configuration sections (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration section; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigSection(ObjectDescription):
indextemplate = 'configuration section; %s'
parse_node = None
def handle_signature(self, sig, signode):
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
if self.indextemplate:
colon = self.indextemplate.find(':')
if colon != -1:
indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon+1:].strip() % (name,)
else:
indextype = 'single'
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigOptionXRefRole(XRefRole):
"""
Cross-referencing role for configuration options (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration option; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigFileDomain(Domain):
name = 'config'
label = 'Config'
object_types = {
'option': ObjType('config option', 'option'),
'section': ObjType('config section', 'section'),
}
directives = {
'option': ConfigOption,
'section': ConfigSection,
}
roles = {
'option': ConfigOptionXRefRole(),
'section': ConfigSectionXRefRole(),
}
initial_data = {
'objects': {}, # (type, name) -> docname, labelid
}
def clear_doc(self, docname):
for key, (fn, _) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][key]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
docname, labelid = self.data['objects'].get((typ, target), ('', ''))
if not docname:
return None
else:
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def get_objects(self):
for (type, name), info in self.data['objects'].items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
def setup(app):
app.add_domain(ConfigFileDomain)
| gpl-2.0 |
drcapulet/sentry | src/sentry/db/models/utils.py | 27 | 2213 | """
sentry.db.utils
~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import operator
from uuid import uuid4
from django.db.models import F
from django.db.models.expressions import ExpressionNode
from django.template.defaultfilters import slugify
from sentry.db.exceptions import CannotResolveExpression
EXPRESSION_NODE_CALLBACKS = {
ExpressionNode.ADD: operator.add,
ExpressionNode.SUB: operator.sub,
ExpressionNode.MUL: operator.mul,
ExpressionNode.DIV: operator.div,
ExpressionNode.MOD: operator.mod,
}
try:
EXPRESSION_NODE_CALLBACKS[ExpressionNode.AND] = operator.and_
except AttributeError:
EXPRESSION_NODE_CALLBACKS[ExpressionNode.BITAND] = operator.and_
try:
EXPRESSION_NODE_CALLBACKS[ExpressionNode.OR] = operator.or_
except AttributeError:
EXPRESSION_NODE_CALLBACKS[ExpressionNode.BITOR] = operator.or_
def resolve_expression_node(instance, node):
def _resolve(instance, node):
if isinstance(node, F):
return getattr(instance, node.name)
elif isinstance(node, ExpressionNode):
return resolve_expression_node(instance, node)
return node
op = EXPRESSION_NODE_CALLBACKS.get(node.connector, None)
if not op:
raise CannotResolveExpression
runner = _resolve(instance, node.children[0])
for n in node.children[1:]:
runner = op(runner, _resolve(instance, n))
return runner
def slugify_instance(inst, label, reserved=(), max_length=30, *args, **kwargs):
base_slug = slugify(label)[:max_length]
if base_slug in reserved:
base_slug = None
elif base_slug is not None:
base_slug = base_slug.strip()
if not base_slug:
base_slug = uuid4().hex[:12]
base_qs = type(inst).objects.all()
if inst.id:
base_qs = base_qs.exclude(id=inst.id)
if args or kwargs:
base_qs = base_qs.filter(*args, **kwargs)
inst.slug = base_slug
n = 0
while base_qs.filter(slug__iexact=inst.slug).exists():
n += 1
inst.slug = base_slug[:max_length - len(str(n)) - 1] + '-' + str(n)
| bsd-3-clause |
republic-analytics/luigi | test/contrib/hdfs/webhdfs_client_test.py | 27 | 1650 | # -*- coding: utf-8 -*-
#
# Copyright 2015 VNG Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nose.plugins.attrib import attr
from helpers import with_config
from webhdfs_minicluster import WebHdfsMiniClusterTestCase
from contrib.hdfs_test import HdfsTargetTestMixin
from luigi.contrib.hdfs import WebHdfsClient
@attr('minicluster')
class WebHdfsTargetTest(WebHdfsMiniClusterTestCase, HdfsTargetTestMixin):
def run(self, result=None):
conf = {'hdfs': {'client': 'webhdfs'},
'webhdfs': {'port': str(self.cluster.webhdfs_port)},
}
with_config(conf)(super(WebHdfsTargetTest, self).run)(result)
def test_actually_using_webhdfs(self):
self.assertTrue(isinstance(self.create_target().fs, WebHdfsClient))
# Here is a bunch of tests that are currently failing. As should be
# mentioned in the WebHdfsClient docs, it is not yet feature complete.
test_slow_exists = None
test_glob_exists = None
test_with_close = None
test_with_exception = None
# This one fails when run together with the whole test suite
test_write_cleanup_no_close = None
| apache-2.0 |
wunderlins/learning | python/django/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| gpl-2.0 |
hirokiky/oauthlib | tests/oauth2/rfc6749/test_tokens.py | 27 | 2823 | from __future__ import absolute_import, unicode_literals
from ...unittest import TestCase
from oauthlib.oauth2.rfc6749.tokens import *
class TokenTest(TestCase):
# MAC without body/payload or extension
mac_plain = {
'token': 'h480djs93hd8',
'uri': 'http://example.com/resource/1?b=1&a=2',
'key': '489dks293j39',
'http_method': 'GET',
'nonce': '264095:dj83hs9s',
'hash_algorithm': 'hmac-sha-1'
}
auth_plain = {
'Authorization': 'MAC id="h480djs93hd8", nonce="264095:dj83hs9s",'
' mac="SLDJd4mg43cjQfElUs3Qub4L6xE="'
}
# MAC with body/payload, no extension
mac_body = {
'token': 'jd93dh9dh39D',
'uri': 'http://example.com/request',
'key': '8yfrufh348h',
'http_method': 'POST',
'nonce': '273156:di3hvdf8',
'hash_algorithm': 'hmac-sha-1',
'body': 'hello=world%21'
}
auth_body = {
'Authorization': 'MAC id="jd93dh9dh39D", nonce="273156:di3hvdf8",'
' bodyhash="k9kbtCIy0CkI3/FEfpS/oIDjk6k=", mac="W7bdMZbv9UWOTadASIQHagZyirA="'
}
# MAC with body/payload and extension
mac_both = {
'token': 'h480djs93hd8',
'uri': 'http://example.com/request?b5=%3D%253D&a3=a&c%40=&a2=r%20b&c2&a3=2+q',
'key': '489dks293j39',
'http_method': 'GET',
'nonce': '264095:7d8f3e4a',
'hash_algorithm': 'hmac-sha-1',
'body': 'Hello World!',
'ext': 'a,b,c'
}
auth_both = {
'Authorization': 'MAC id="h480djs93hd8", nonce="264095:7d8f3e4a",'
' bodyhash="Lve95gjOVATpfV8EL5X4nxwjKHE=", ext="a,b,c",'
' mac="Z3C2DojEopRDIC88/imW8Ez853g="'
}
# Bearer
token = 'vF9dft4qmT'
uri = 'http://server.example.com/resource'
bearer_headers = {
'Authorization': 'Bearer vF9dft4qmT'
}
bearer_body = 'access_token=vF9dft4qmT'
bearer_uri = 'http://server.example.com/resource?access_token=vF9dft4qmT'
def test_prepare_mac_header(self):
"""Verify mac signatures correctness
TODO: verify hmac-sha-256
"""
self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)
self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)
self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)
def test_prepare_bearer_request(self):
"""Verify proper addition of bearer tokens to requests.
They may be represented as query components in body or URI or
in a Bearer authorization header.
"""
self.assertEqual(prepare_bearer_headers(self.token), self.bearer_headers)
self.assertEqual(prepare_bearer_body(self.token), self.bearer_body)
self.assertEqual(prepare_bearer_uri(self.token, uri=self.uri), self.bearer_uri)
| bsd-3-clause |
Livit/Livit.Learn.EdX | lms/djangoapps/courseware/tests/test_masquerade.py | 13 | 17662 | """
Unit tests for masquerade.
"""
import json
import pickle
from mock import patch
from nose.plugins.attrib import attr
from datetime import datetime
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.timezone import UTC
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.masquerade import (
CourseMasquerade,
MasqueradingKeyValueStore,
handle_ajax,
setup_masquerade,
get_masquerading_group_info
)
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase, get_request_for_user
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from student.tests.factories import UserFactory
from xblock.runtime import DictKeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from xmodule.partitions.partitions import Group, UserPartition
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super(MasqueradeTestCase, cls).setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC())})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super(MasqueradeTestCase, self).setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.sequential.location.name,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def _create_mock_json_request(self, user, body, method='POST', session=None):
"""
Returns a mock JSON request for the specified user
"""
request = get_request_for_user(user)
request.method = method
request.META = {'CONTENT_TYPE': ['application/json']}
request.body = body
request.session = session or {}
return request
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content
self.assertTrue(self.sequential_display_name in content, "Subsection should be visible")
self.assertEqual(staff_debug_expected, 'Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show Answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content)['html']
self.assertTrue(self.problem_display_name in problem_html)
self.assertEqual(show_answer_expected, "Show Answer" in problem_html)
@attr('shard_1')
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
def update_masquerade(self, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': unicode(self.course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({"role": role, "group_id": group_id, "user_name": user_name}),
"application/json"
)
self.assertEqual(response.status_code, 200)
return response
@attr('shard_1')
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@attr('shard_1')
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super(TestStaffMasqueradeAsSpecificStudent, self).setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
return json.loads(self.look_at_question(self.problem_display_name).content)['progress_detail']
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', user_name=self.student_user.username)
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
# Answer correctly as the student, and check progress.
self.login_student()
self.submit_answer('Correct', 'Correct')
self.assertEqual(self.get_progress_detail(), u'2/2')
# Log in as staff, and check the problem is unanswered.
self.login_staff()
self.assertEqual(self.get_progress_detail(), u'0/2')
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', user_name=self.student_user.username)
self.assertEqual(self.get_progress_detail(), u'2/2')
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
self.assertEqual(self.get_progress_detail(), u'1/2')
# Reload the page and check we see the student state again.
self.get_courseware_page()
self.assertEqual(self.get_progress_detail(), u'2/2')
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
self.assertEqual(self.get_progress_detail(), u'0/2')
# Verify the student state did not change.
self.login_student()
self.assertEqual(self.get_progress_detail(), u'2/2')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', user_name=self.student_user.username)
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
@attr('shard_1')
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestGetMasqueradingGroupId, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
# Verify that there is no masquerading group initially
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertIsNone(group_id)
self.assertIsNone(user_partition_id)
# Install a masquerading group
request = self._create_mock_json_request(
self.test_user,
body='{"role": "student", "user_partition_id": 0, "group_id": 1}'
)
handle_ajax(request, unicode(self.course.id))
setup_masquerade(request, self.test_user, True)
# Verify that the masquerading group is returned
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertEqual(group_id, 1)
self.assertEqual(user_partition_id, 0)
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # pylint: disable=unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super(MasqueradingKeyValueStoreTest, self).setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
self.assertEqual(self.kvs.get('a'), 42)
self.assertEqual(self.kvs.get('b'), None)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
with self.assertRaises(KeyError):
self.kvs.get('d')
self.assertTrue(self.kvs.has('a'))
self.assertTrue(self.kvs.has('b'))
self.assertTrue(self.kvs.has('c'))
self.assertFalse(self.kvs.has('d'))
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
self.assertEqual(self.kvs.get('a'), 'Norwegian Blue')
self.assertEqual(self.kvs.get('b'), 7)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
self.assertEqual(self.kvs.get('d'), 'Giraffe')
for key in 'abd':
self.assertTrue(self.kvs.has(key))
self.kvs.delete(key)
with self.assertRaises(KeyError):
self.kvs.get(key)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
self.assertEqual(unpickled_cmasq.user_name, None)
| agpl-3.0 |
louiskun/flaskGIT | venv/lib/python2.7/site-packages/alembic/templates/multidb/env.py | 40 | 4146 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
import re
USE_TWOPHASE = False
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# gather section names referring to different
# databases. These are named "engine1", "engine2"
# in the sample .ini file.
db_names = config.get_main_option('databases')
# add your model's MetaData objects here
# for 'autogenerate' support. These must be set
# up to hold just those tables targeting a
# particular database. table.tometadata() may be
# helpful here in case a "copy" of
# a MetaData is needed.
# from myapp import mymodel
# target_metadata = {
# 'engine1':mymodel.metadata1,
# 'engine2':mymodel.metadata2
#}
target_metadata = {}
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
# for the --sql use case, run migrations for each URL into
# individual files.
engines = {}
for name in re.split(r',\s*', db_names):
engines[name] = rec = {}
rec['url'] = context.config.get_section_option(name,
"sqlalchemy.url")
for name, rec in engines.items():
logger.info("Migrating database %s" % name)
file_ = "%s.sql" % name
logger.info("Writing output to %s" % file_)
with open(file_, 'w') as buffer:
context.configure(url=rec['url'], output_buffer=buffer,
target_metadata=target_metadata.get(name),
literal_binds=True)
with context.begin_transaction():
context.run_migrations(engine_name=name)
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# for the direct-to-DB use case, start a transaction on all
# engines, then run all migrations, then commit all transactions.
engines = {}
for name in re.split(r',\s*', db_names):
engines[name] = rec = {}
rec['engine'] = engine_from_config(
context.config.get_section(name),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
for name, rec in engines.items():
engine = rec['engine']
rec['connection'] = conn = engine.connect()
if USE_TWOPHASE:
rec['transaction'] = conn.begin_twophase()
else:
rec['transaction'] = conn.begin()
try:
for name, rec in engines.items():
logger.info("Migrating database %s" % name)
context.configure(
connection=rec['connection'],
upgrade_token="%s_upgrades" % name,
downgrade_token="%s_downgrades" % name,
target_metadata=target_metadata.get(name)
)
context.run_migrations(engine_name=name)
if USE_TWOPHASE:
for rec in engines.values():
rec['transaction'].prepare()
for rec in engines.values():
rec['transaction'].commit()
except:
for rec in engines.values():
rec['transaction'].rollback()
raise
finally:
for rec in engines.values():
rec['connection'].close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit |
verycumbersome/the-blue-alliance | helpers/event_team_manipulator.py | 8 | 1150 | from helpers.cache_clearer import CacheClearer
from helpers.manipulator_base import ManipulatorBase
class EventTeamManipulator(ManipulatorBase):
"""
Handle EventTeam database writes.
"""
@classmethod
def getCacheKeysAndControllers(cls, affected_refs):
return CacheClearer.get_eventteam_cache_keys_and_controllers(affected_refs)
@classmethod
def updateMerge(self, new_event_team, old_event_team, auto_union=True):
"""
Update and return EventTeams.
"""
immutable_attrs = [
"event",
"team",
] # These build key_name, and cannot be changed without deleting the model.
attrs = [
"year", # technically immutable, but corruptable and needs repair. See github issue #409
"status",
]
for attr in attrs:
if getattr(new_event_team, attr) is not None:
if getattr(new_event_team, attr) != getattr(old_event_team, attr):
setattr(old_event_team, attr, getattr(new_event_team, attr))
old_event_team.dirty = True
return old_event_team
| mit |
Spartronics4915/2016-Stronghold | src/org/usfirst/frc/team4915/stronghold/vision/jetson/imgExplore2/networktables/networktable.py | 6 | 29684 |
import threading
from networktables2 import (
DefaultEntryTypes,
NetworkTableClient,
NetworkTableServer,
SocketStreamFactory,
SocketServerStreamProvider
)
__all__ = ["NetworkTable"]
class NetworkTableConnectionListenerAdapter:
"""An adapter that changes the source of a connection event
"""
def __init__(self, targetSource, targetListener):
"""
:param targetSource: the source where the event will appear to come
from
:param targetListener: the listener where events will be forwarded
"""
self.targetSource = targetSource
self.targetListener = targetListener
assert callable(self.targetListener.connected)
assert callable(self.targetListener.disconnected)
def connected(self, remote):
self.targetListener.connected(self.targetSource)
def disconnected(self, remote):
self.targetListener.disconnected(self.targetSource)
class NetworkTableGlobalListenerAdapter:
def __init__(self, listener):
self.listener = listener
assert callable(self.listener)
def valueChanged(self, source, key, value, isNew):
self.listener(key, value, isNew)
class NetworkTableKeyListenerAdapter:
"""An adapter that is used to filter value change notifications for a
specific key
"""
def __init__(self, relativeKey, fullKey, targetSource, targetListener):
"""Create a new adapter
:param relativeKey: the name of the key relative to the table (this
is what the listener will receiver as the key)
:param fullKey: the full name of the key in the NetworkTableNode
:param targetSource: the source that events passed to the target
listener will appear to come from
:param targetListener: the callable where events are forwarded to
"""
assert callable(targetListener)
self.relativeKey = relativeKey
self.fullKey = fullKey
self.targetSource = targetSource
self.targetListener = targetListener
def valueChanged(self, source, key, value, isNew):
if key == self.fullKey:
self.targetListener(self.targetSource,
self.relativeKey, value, isNew)
class NetworkTableListenerAdapter:
"""An adapter that is used to filter value change notifications and make
the path relative to the NetworkTable
"""
def __init__(self, prefix, targetSource, targetListener):
"""Create a new adapter
:param prefix: the prefix that will be filtered/removed from the
beginning of the key
:param targetSource: the source that events passed to the target
listener will appear to come from
:param targetListener: the callable where events are forwarded to
"""
assert callable(targetListener)
self.prefix = prefix
self.targetSource = targetSource
self.targetListener = targetListener
def valueChanged(self, source, key, value, isNew):
#TODO use string cache
if key.startswith(self.prefix):
relativeKey = key[len(self.prefix):]
if NetworkTable.PATH_SEPARATOR in relativeKey:
return
self.targetListener(self.targetSource, relativeKey,
value, isNew)
class NetworkTableSubListenerAdapter:
"""An adapter that is used to filter sub table change notifications and
make the path relative to the NetworkTable
"""
def __init__(self, prefix, targetSource, targetListener):
"""Create a new adapter
:param prefix: the prefix of the current table
:param targetSource: the source that events passed to the target
listener will appear to come from
:param targetListener: the callable where events are forwarded to
"""
assert callable(targetListener)
self.prefix = prefix
self.targetSource = targetSource
self.targetListener = targetListener
self.notifiedTables = set()
def valueChanged(self, source, key, value, isNew):
#TODO use string cache
if not key.startswith(self.prefix):
return
key = key[len(self.prefix):]
if key.startswith(NetworkTable.PATH_SEPARATOR):
key = key[len(NetworkTable.PATH_SEPARATOR):]
#TODO implement sub table listening better
keysplit = key.split(NetworkTable.PATH_SEPARATOR)
if len(keysplit) < 2:
return
subTableKey = keysplit[0]
if subTableKey in self.notifiedTables:
return
self.notifiedTables.add(subTableKey)
self.targetListener(self.targetSource, subTableKey,
self.targetSource.getSubTable(subTableKey), True)
class AutoUpdateValue:
"""Holds a value from NetworkTables, and changes it as new entries
come in. Updates to this value are NOT passed on to NetworkTables.
Do not create this object directly, as it only holds the value.
Use :meth:`.NetworkTable.getAutoUpdateValue` to obtain an instance
of this.
"""
__slots__ = ['__value']
def __init__(self, default):
self.__value = default
def get(self):
'''Returns the value held by this object'''
return self.__value
@property
def value(self):
return self.__value
# Comparison operators et al
def __lt__(self, other):
raise TypeError("< not allowed on AutoUpdateValue objects. Use the .value attribute instead")
def __le__(self, other):
raise TypeError("<= not allowed on AutoUpdateValue objects. Use the .value attribute instead")
def __eq__(self, other):
raise TypeError("== not allowed on AutoUpdateValue objects. Use the .value attribute instead")
def __ne__(self, other):
raise TypeError("!= not allowed on AutoUpdateValue objects. Use the .value attribute instead")
def __gt__(self, other):
raise TypeError("> not allowed on AutoUpdateValue objects. Use the .value attribute instead")
def __ge__(self, other):
raise TypeError(">= not allowed on AutoUpdateValue objects. Use the .value attribute instead")
def __bool__(self):
raise TypeError("< not allowed on AutoUpdateValue objects. Use the .value attribute instead")
def __hash__(self):
raise TypeError("__hash__ not allowed on AutoUpdateValue objects")
def __repr__(self):
return '<AutoUpdateValue: %s>' % (self.__value.__repr__(), )
class AutoUpdateListener:
def __init__(self, table):
# no lock required if we use atomic operations (setdefault, get) on it
self.keys = {}
table.addTableListener(self._valueChanged)
def createAutoValue(self, key, default):
new_value = AutoUpdateValue(default)
return self.keys.setdefault(key, new_value)
def _valueChanged(self, table, key, value, isNew):
auto_value = self.keys.get(key)
if auto_value is not None:
auto_value._AutoUpdateValue__value = value
class NetworkTableProvider:
"""Provides a NetworkTable for a given NetworkTableNode
"""
def __init__(self, node):
"""Create a new NetworkTableProvider for a given NetworkTableNode
:param node: the node that handles the actual network table
"""
self.node = node
self.tables = {}
self.global_listeners = {}
def getRootTable(self):
return self.getTable("")
def getTable(self, key):
table = self.tables.get(key)
if table is None:
table = NetworkTable(key, self)
self.tables[key] = table
return table
def getNode(self):
""":returns: the Network Table node that backs the Tables returned by
this provider
"""
return self.node
def close(self):
"""close the backing network table node
"""
self.node.stop()
def addGlobalListener(self, listener, immediateNotify):
adapter = self.global_listeners.get(listener)
if adapter is None:
adapter = NetworkTableGlobalListenerAdapter(listener)
self.global_listeners[listener] = adapter
self.node.addTableListener(adapter, immediateNotify)
def removeGlobalListener(self, listener):
adapter = self.global_listeners.get(listener)
if adapter is not None:
self.node.removeTableListener(adapter)
del self.global_listeners[listener]
def _create_server_node(ipAddress, port):
"""Creates a network tables server node
:param ipAddress: the IP address configured by the user
:param port: the port configured by the user
:returns: a new node that can back a network table
"""
return NetworkTableServer(SocketServerStreamProvider(port))
def _create_client_node(ipAddress, port):
"""Creates a network tables client node
:param ipAddress: the IP address configured by the user
:param port: the port configured by the user
:returns: a new node that can back a network table
"""
if ipAddress is None:
raise ValueError("IP address cannot be None when in client mode")
client = NetworkTableClient(SocketStreamFactory(ipAddress, port))
return client
def _create_test_node(ipAddress, port):
class NullStreamFactory:
def createStream(self):
return None
return NetworkTableClient(NullStreamFactory())
class NetworkTable:
"""
This is the primary object that you will use when interacting with
NetworkTables. You should not directly create a NetworkTable object,
but instead use the :meth:`getTable` method to create an appropriate
object instead.
For example, to interact with the SmartDashboard::
from networktables import NetworkTable
sd = NetworkTable.getTable('SmartDashboard')
sd.putNumber('someNumber', 1234)
...
"""
#: The path separator for sub-tables and keys
PATH_SEPARATOR = '/'
#: The default port that network tables operates on
DEFAULT_PORT = 1735
_staticProvider = None
_mode_fn = staticmethod(_create_server_node)
port = DEFAULT_PORT
ipAddress = None
_staticMutex = threading.RLock()
class _defaultValueSentry:
pass
@staticmethod
def checkInit():
with NetworkTable._staticMutex:
if NetworkTable._staticProvider is not None:
raise RuntimeError("Network tables has already been initialized")
@staticmethod
def initialize():
with NetworkTable._staticMutex:
NetworkTable.checkInit()
NetworkTable._staticProvider = NetworkTableProvider(
NetworkTable._mode_fn(NetworkTable.ipAddress,
NetworkTable.port))
@staticmethod
def setTableProvider(provider):
"""set the table provider for static network tables methods
.. warning:: This must be called before :meth:`initalize` or :meth:`getTable`
"""
with NetworkTable._staticMutex:
NetworkTable.checkInit()
NetworkTable._staticProvider = provider
@staticmethod
def setServerMode():
"""set that network tables should be a server (this is the default)
.. warning:: This must be called before :meth:`initalize` or :meth:`getTable`
"""
with NetworkTable._staticMutex:
NetworkTable.checkInit()
NetworkTable._mode_fn = staticmethod(_create_server_node)
@staticmethod
def setClientMode():
"""set that network tables should be a client
.. warning:: This must be called before :meth:`initalize` or :meth:`getTable`
"""
with NetworkTable._staticMutex:
NetworkTable.checkInit()
NetworkTable._mode_fn = staticmethod(_create_client_node)
@staticmethod
def setTestMode():
"""Setup network tables to run in unit test mode
.. warning:: This must be called before :meth:`initalize` or :meth:`getTable`
"""
with NetworkTable._staticMutex:
NetworkTable.checkInit()
NetworkTable._mode_fn = staticmethod(_create_test_node)
@staticmethod
def setTeam(team):
"""set the team the robot is configured for (this will set the ip
address that network tables will connect to in client mode)
:param team: the team number
.. warning:: This must be called before :meth:`initalize` or :meth:`getTable`
"""
NetworkTable.setIPAddress("10.%d.%d.2" % divmod(team, 100))
@staticmethod
def setIPAddress(address):
""":param address: the adress that network tables will connect to in
client mode
.. warning:: This must be called before :meth:`initalize` or :meth:`getTable`
"""
with NetworkTable._staticMutex:
NetworkTable.checkInit()
NetworkTable.ipAddress = address
@staticmethod
def setWriteFlushPeriod(flushPeriod):
"""Sets the period of time between writes to the network.
WPILib's networktables and SmartDashboard default to 100ms, we have
set it to 50ms instead for quicker response time. You should not set
this value too low, as it could potentially increase the volume of
data sent over the network.
.. warning:: If you don't know what this setting affects, don't mess
with it!
:param latency: Write flush period in seconds (default is 0.050,
or 50ms)
"""
from networktables2.client import WriteManager
WriteManager.SLEEP_TIME = flushPeriod
@staticmethod
def getTable(key):
"""Gets the table with the specified key. If the table does not exist,
a new table will be created.
This will automatically initialize network tables if it has not been
already
:param key: the key name
:returns: the network table requested
:rtype: :class:`NetworkTable`
"""
with NetworkTable._staticMutex:
if NetworkTable._staticProvider is None:
NetworkTable.initialize()
if not key.startswith(NetworkTable.PATH_SEPARATOR):
key = NetworkTable.PATH_SEPARATOR + key
return NetworkTable._staticProvider.getTable(key)
@staticmethod
def getGlobalTable():
"""Returns an object that allows you to write values to raw network table
keys (which are paths with / separators).
This will automatically initialize network tables if it has not been
already.
.. warning:: Generally, you should not use this object. Prefer to use
:meth:`getTable` instead and do operations on individual
NetworkTables.
.. versionadded:: 2015.2.0
:rtype: :class:`.NetworkTableNode`
"""
with NetworkTable._staticMutex:
if NetworkTable._staticProvider is None:
NetworkTable.initialize()
return NetworkTable._staticProvider.getNode()
@staticmethod
def addGlobalListener(listener, immediateNotify=True):
'''Adds a listener that will be notified when any key in any
NetworkTable is changed. The keys that are received using this
listener will be full NetworkTable keys. Most users will not
want to use this listener type.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
This will automatically initialize network tables if it has not been
already.
:param listener: A callable that has this signature: `callable(key, value, isNew)`
:param immediateNotify: If True, the listener will be called immediately with the current values of the table
.. versionadded:: 2015.2.0
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended as we are not
currently sure if deadlocks will occur
'''
with NetworkTable._staticMutex:
if NetworkTable._staticProvider is None:
NetworkTable.initialize()
NetworkTable._staticProvider.addGlobalListener(listener, immediateNotify)
@staticmethod
def removeGlobalListener(listener):
'''Removes a global listener
.. versionadded:: 2015.2.0
'''
with NetworkTable._staticMutex:
NetworkTable._staticProvider.removeGlobalListener(listener)
def __init__(self, path, provider):
self.path = path
self.absoluteKeyCache = NetworkTable._KeyCache(path)
self.provider = provider
self.node = provider.getNode()
self.connectionListenerMap = {}
self.listenerMap = {}
self.mutex = threading.RLock()
self.autoListener = None
def __str__(self):
return "NetworkTable: "+self.path
def isConnected(self):
return self.node.isConnected()
def isServer(self):
return self.node.isServer()
class _KeyCache:
def __init__(self, path):
if path[-len(NetworkTable.PATH_SEPARATOR):] == NetworkTable.PATH_SEPARATOR:
path = path[:-len(NetworkTable.PATH_SEPARATOR)]
self.path = path
self.cache = {}
def get(self, key):
cachedValue = self.cache.get(key)
if cachedValue is None:
cachedValue = self.path + NetworkTable.PATH_SEPARATOR + key
self.cache[key] = cachedValue
return cachedValue
def addConnectionListener(self, listener, immediateNotify=False):
'''Adds a listener that will be notified when a new connection to a
NetworkTables client/server is established.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: An object that has a 'connected' function and a
'disconnected' function. Each function will be called
with this NetworkTable object as the first parameter
:param immediateNotify: If True, the listener will be called immediately
with the current values of the table
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended as we are not
currently sure if deadlocks will occur
'''
adapter = self.connectionListenerMap.get(listener)
if adapter is not None:
raise ValueError("Cannot add the same listener twice")
adapter = NetworkTableConnectionListenerAdapter(self, listener)
self.connectionListenerMap[listener] = adapter
self.node.addConnectionListener(adapter, immediateNotify)
def removeConnectionListener(self, listener):
'''Removes a connection listener
:param listener: The object registered for connection notifications
'''
adapter = self.connectionListenerMap.get(listener)
if adapter is not None:
self.node.removeConnectionListener(adapter)
del self.connectionListenerMap[listener]
def addTableListener(self, listener, immediateNotify=False, key=None):
'''Adds a listener that will be notified when any key in this
NetworkTable is changed, or when a specified key changes.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: A callable that has this signature: `callable(source, key, value, isNew)`
:param immediateNotify: If True, the listener will be called immediately with the current values of the table
:param key: If specified, the listener will only be called when this key is changed
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended as we are not
currently sure if deadlocks will occur
'''
adapters = self.listenerMap.setdefault(listener, [])
if key is not None:
adapter = NetworkTableKeyListenerAdapter(
key, self.absoluteKeyCache.get(key), self, listener)
else:
adapter = NetworkTableListenerAdapter(
self.path+self.PATH_SEPARATOR, self, listener)
adapters.append(adapter)
self.node.addTableListener(adapter, immediateNotify)
def addSubTableListener(self, listener):
'''Adds a listener that will be notified when any key in a subtable of
this NetworkTable is changed.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: A callable that has this signature: `callable(source, key, value, isNew)`
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended as we are not
currently sure if deadlocks will occur
'''
adapters = self.listenerMap.setdefault(listener, [])
adapter = NetworkTableSubListenerAdapter(self.path, self, listener)
adapters.append(adapter)
self.node.addTableListener(adapter, True)
def removeTableListener(self, listener):
'''Removes a table listener
:param listener: callable that was passed to :meth:`addTableListener`
or :meth:`addSubTableListener`
'''
adapters = self.listenerMap.get(listener)
if adapters is not None:
for adapter in adapters:
self.node.removeTableListener(adapter)
del adapters[:]
def getSubTable(self, key):
"""Returns the table at the specified key. If there is no table at the
specified key, it will create a new table
:param key: the key name
:returns: the networktable to be returned
:rtype: :class:`NetworkTable`
"""
with self.mutex:
return self.provider.getTable(self.absoluteKeyCache.get(key))
def containsKey(self, key):
"""Checks the table and tells if it contains the specified key
:param key: the key to be checked
"""
with self.mutex:
return self.node.containsKey(self.absoluteKeyCache.get(key))
def __contains__(self, key):
return self.containsKey(key)
def containsSubTable(self, key):
subtablePrefix = self.absoluteKeyCache.get(key)+self.PATH_SEPARATOR
for key in self.node.getEntryStore().keys():
if key.startswith(subtablePrefix):
return True
return False
def putNumber(self, key, value):
"""Maps the specified key to the specified value in this table. The key
can not be None. The value can be retrieved by calling the get method
with a key that is equal to the original key.
:param key: the key
:param value: the value
"""
self.node.putValue(self.absoluteKeyCache.get(key), float(value), DefaultEntryTypes.DOUBLE)
def getNumber(self, key, defaultValue=_defaultValueSentry):
"""Returns the key that the name maps to. If the key is None, it will
return the default value (or raise KeyError if a default value is not
provided).
:param key: the key name
:param defaultValue: the default value if the key is None. If not
specified, raises KeyError if the key is None.
:returns: the key
"""
try:
return self.node.getDouble(self.absoluteKeyCache.get(key))
except KeyError:
if defaultValue is NetworkTable._defaultValueSentry:
raise
return defaultValue
def putString(self, key, value):
"""Maps the specified key to the specified value in this table. The key
can not be None. The value can be retrieved by calling the get method
with a key that is equal to the original key.
:param key: the key
:param value: the value
"""
self.node.putValue(self.absoluteKeyCache.get(key), str(value), DefaultEntryTypes.STRING)
def getString(self, key, defaultValue=_defaultValueSentry):
"""Returns the key that the name maps to. If the key is None, it will
return the default value (or raise KeyError if a default value is not
provided).
:param key: the key name
:param defaultValue: the default value if the key is None. If not
specified, raises KeyError if the key is None.
:returns: the key
"""
try:
return self.node.getString(self.absoluteKeyCache.get(key))
except KeyError:
if defaultValue is NetworkTable._defaultValueSentry:
raise
return defaultValue
def putBoolean(self, key, value):
"""Maps the specified key to the specified value in this table. The key
can not be None. The value can be retrieved by calling the get method
with a key that is equal to the original key.
:param key: the key
:param value: the value
"""
self.node.putValue(self.absoluteKeyCache.get(key), bool(value), DefaultEntryTypes.BOOLEAN)
def getBoolean(self, key, defaultValue=_defaultValueSentry):
"""Returns the key that the name maps to. If the key is None, it will
return the default value (or raise KeyError if a default value is not
provided).
:param key: the key name
:param defaultValue: the default value if the key is None. If not
specified, raises KeyError if the key is None.
:returns: the key
"""
try:
return self.node.getBoolean(self.absoluteKeyCache.get(key))
except KeyError:
if defaultValue is NetworkTable._defaultValueSentry:
raise
return defaultValue
def retrieveValue(self, key, externalValue):
"""Retrieves the data associated with a complex data type (such as
arrays) and stores it.
For example, to retrieve a type which is an array of strings::
val = networktables.StringArray()
nt.retrieveValue('some key', val)
:param key: the key name
:param externalValue: The complex data member
"""
self.node.retrieveValue(self.absoluteKeyCache.get(key), externalValue)
def putValue(self, key, value):
"""Maps the specified key to the specified value in this table. The key
can not be None. The value can be retrieved by calling the get method
with a key that is equal to the original key.
:param key: the key name
:param value: the value to be put
"""
self.node.putValue(self.absoluteKeyCache.get(key), value)
def getValue(self, key, defaultValue=_defaultValueSentry):
"""Returns the key that the name maps to. If the key is None, it will
return the default value (or raise KeyError if a default value is not
provided).
:param key: the key name
:param defaultValue: the default value if the key is None. If not
specified, raises KeyError if the key is None.
:returns: the key
"""
try:
return self.node.getValue(self.absoluteKeyCache.get(key))
except KeyError:
if defaultValue is NetworkTable._defaultValueSentry:
raise
return defaultValue
def getAutoUpdateValue(self, key, defaultValue, writeDefault=True):
'''Returns an object that will be automatically updated when the
value is updated by networktables.
Does not work with complex types. If you modify the returned type,
the value will NOT be written back to NetworkTables.
:param key: the key name
:type key: str
:param defaultValue: Default value to use if not in the table
:type defaultValue: any
:param writeDefault: If True, put the default value to the table,
overwriting existing values
:type writeDefault: bool
:rtype: :class:`.AutoUpdateValue`
.. versionadded:: 2015.1.3
'''
value = defaultValue
if writeDefault:
self.putValue(key, value)
else:
try:
value = self.getValue(key)
except KeyError:
self.putValue(key, value)
with self.mutex:
if self.autoListener is None:
self.autoListener = AutoUpdateListener(self)
return self.autoListener.createAutoValue(key, value)
# Deprecated Methods
putInt = putNumber
getInt = getNumber
putDouble = putNumber
getDouble = getNumber
| mit |
ycasg/PyNLO | src/validation/Old and Partial Tests/Dudley_SSFM.py | 2 | 2731 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 15 15:39:12 2014
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: dim1
"""
import numpy as np
import matplotlib.pyplot as plt
from pynlo.interactions.FourWaveMixing import SSFM
from pynlo.media.fibers import fiber
from pynlo.light.DerivedPulses import SechPulse
#plt.close('all')
dz = 1e-3
steps = 100
range1 = np.arange(steps)
centerwl = 835.0
fiber_length = 0.04
pump_power = 1.0e4
pump_pulse_length = 28.4e-3
npoints = 2**13
init = SechPulse(pump_power, pump_pulse_length, centerwl, time_window = 10.0,
GDD = 0, TOD = 0.0, NPTS = npoints, frep_MHz = 100, power_is_avg = False)
fiber1 = fiber.FiberInstance()
fiber1.load_from_db( fiber_length, 'dudley')
evol = SSFM.SSFM(dz = 1e-6, local_error = 0.001, USE_SIMPLE_RAMAN = True)
y = np.zeros(steps)
AW = np.zeros((init.NPTS, steps))
AT = np.copy(AW)
y, AW, AT, pulse1 = evol.propagate(pulse_in = init, fiber = fiber1,
n_steps = steps)
wl = init.wl_nm
loWL = 400
hiWL = 1400
iis = np.logical_and(wl>loWL,wl<hiWL)
iisT = np.logical_and(init.T_ps>-1,init.T_ps<5)
xW = wl[iis]
xT = init.T_ps[iisT]
zW_in = np.transpose(AW)[:,iis]
zT_in = np.transpose(AT)[:,iisT]
zW = 10*np.log10(np.abs(zW_in)**2)
zT = 10*np.log10(np.abs(zT_in)**2)
mlIW = np.max(zW)
mlIT = np.max(zT)
D = fiber1.Beta2_to_D(init)
beta = fiber1.Beta2(init)
#
#plt.figure()
#plt.subplot(121)
#plt.plot(wl,D,'x')
#plt.xlim(400,1600)
#plt.ylim(-400,300)
#plt.xlabel('Wavelength (nm)')
#plt.ylabel('D (ps/nm/km)')
#plt.subplot(122)
#plt.plot(wl,beta*1000,'x')
#plt.xlim(400,1600)
#plt.ylim(-350,200)
#plt.xlabel('Wavelength (nm)')
#plt.ylabel(r'$\beta_2$ (ps$^2$/km)')
plt.figure()
plt.subplot(121)
plt.pcolormesh(xW, y, zW, vmin = mlIW - 40.0, vmax = mlIW)
plt.autoscale(tight=True)
plt.xlim([loWL, hiWL])
plt.xlabel('Wavelength (nm)')
plt.ylabel('Distance (m)')
plt.subplot(122)
plt.pcolormesh(xT, y, zT, vmin = mlIT - 40.0, vmax = mlIT)
plt.autoscale(tight=True)
plt.xlabel('Delay (ps)')
plt.ylabel('Distance (m)')
plt.show() | gpl-3.0 |
tgroh/beam | sdks/python/apache_beam/runners/portability/fn_api_runner_test.py | 2 | 16344 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import functools
import logging
import os
import tempfile
import time
import traceback
import unittest
import apache_beam as beam
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.runners.portability import fn_api_runner
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker import statesampler
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import window
if statesampler.FAST_SAMPLER:
DEFAULT_SAMPLING_PERIOD_MS = statesampler.DEFAULT_SAMPLING_PERIOD_MS
else:
DEFAULT_SAMPLING_PERIOD_MS = 0
class FnApiRunnerTest(unittest.TestCase):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=False))
def test_assert_that(self):
# TODO: figure out a way for fn_api_runner to parse and raise the
# underlying exception.
with self.assertRaisesRegexp(Exception, 'Failed assert'):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a']))
def test_create(self):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a', 'b']))
def test_pardo(self):
with self.create_pipeline() as p:
res = (p
| beam.Create(['a', 'bc'])
| beam.Map(lambda e: e * 2)
| beam.Map(lambda e: e + 'x'))
assert_that(res, equal_to(['aax', 'bcbcx']))
def test_pardo_metrics(self):
class MyDoFn(beam.DoFn):
def start_bundle(self):
self.count = beam.metrics.Metrics.counter('ns1', 'elements')
def process(self, element):
self.count.inc(element)
return [element]
class MyOtherDoFn(beam.DoFn):
def start_bundle(self):
self.count = beam.metrics.Metrics.counter('ns2', 'elementsplusone')
def process(self, element):
self.count.inc(element + 1)
return [element]
with self.create_pipeline() as p:
res = (p | beam.Create([1, 2, 3])
| 'mydofn' >> beam.ParDo(MyDoFn())
| 'myotherdofn' >> beam.ParDo(MyOtherDoFn()))
p.run()
if not MetricsEnvironment.METRICS_SUPPORTED:
self.skipTest('Metrics are not supported.')
counter_updates = [{'key': key, 'value': val}
for container in p.runner.metrics_containers()
for key, val in
container.get_updates().counters.items()]
counter_values = [update['value'] for update in counter_updates]
counter_keys = [update['key'] for update in counter_updates]
assert_that(res, equal_to([1, 2, 3]))
self.assertEqual(counter_values, [6, 9])
self.assertEqual(counter_keys, [
MetricKey('mydofn',
MetricName('ns1', 'elements')),
MetricKey('myotherdofn',
MetricName('ns2', 'elementsplusone'))])
def test_pardo_side_outputs(self):
def tee(elem, *tags):
for tag in tags:
if tag in elem:
yield beam.pvalue.TaggedOutput(tag, elem)
with self.create_pipeline() as p:
xy = (p
| 'Create' >> beam.Create(['x', 'y', 'xy'])
| beam.FlatMap(tee, 'x', 'y').with_outputs())
assert_that(xy.x, equal_to(['x', 'xy']), label='x')
assert_that(xy.y, equal_to(['y', 'xy']), label='y')
def test_pardo_side_and_main_outputs(self):
def even_odd(elem):
yield elem
yield beam.pvalue.TaggedOutput('odd' if elem % 2 else 'even', elem)
with self.create_pipeline() as p:
ints = p | beam.Create([1, 2, 3])
named = ints | 'named' >> beam.FlatMap(
even_odd).with_outputs('even', 'odd', main='all')
assert_that(named.all, equal_to([1, 2, 3]), label='named.all')
assert_that(named.even, equal_to([2]), label='named.even')
assert_that(named.odd, equal_to([1, 3]), label='named.odd')
unnamed = ints | 'unnamed' >> beam.FlatMap(even_odd).with_outputs()
unnamed[None] | beam.Map(id) # pylint: disable=expression-not-assigned
assert_that(unnamed[None], equal_to([1, 2, 3]), label='unnamed.all')
assert_that(unnamed.even, equal_to([2]), label='unnamed.even')
assert_that(unnamed.odd, equal_to([1, 3]), label='unnamed.odd')
def test_pardo_side_inputs(self):
def cross_product(elem, sides):
for side in sides:
yield elem, side
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create(['a', 'b', 'c'])
side = p | 'side' >> beam.Create(['x', 'y'])
assert_that(main | beam.FlatMap(cross_product, beam.pvalue.AsList(side)),
equal_to([('a', 'x'), ('b', 'x'), ('c', 'x'),
('a', 'y'), ('b', 'y'), ('c', 'y')]))
def test_pardo_windowed_side_inputs(self):
with self.create_pipeline() as p:
# Now with some windowing.
pcoll = p | beam.Create(range(10)) | beam.Map(
lambda t: window.TimestampedValue(t, t))
# Intentionally choosing non-aligned windows to highlight the transition.
main = pcoll | 'WindowMain' >> beam.WindowInto(window.FixedWindows(5))
side = pcoll | 'WindowSide' >> beam.WindowInto(window.FixedWindows(7))
res = main | beam.Map(lambda x, s: (x, sorted(s)),
beam.pvalue.AsList(side))
assert_that(
res,
equal_to([
# The window [0, 5) maps to the window [0, 7).
(0, range(7)),
(1, range(7)),
(2, range(7)),
(3, range(7)),
(4, range(7)),
# The window [5, 10) maps to the window [7, 14).
(5, range(7, 10)),
(6, range(7, 10)),
(7, range(7, 10)),
(8, range(7, 10)),
(9, range(7, 10))]),
label='windowed')
def test_flattened_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create([None])
side1 = p | 'side1' >> beam.Create([('a', 1)])
side2 = p | 'side2' >> beam.Create([('b', 2)])
side = (side1, side2) | beam.Flatten()
assert_that(
main | beam.Map(lambda a, b: (a, b), beam.pvalue.AsDict(side)),
equal_to([(None, {'a': 1, 'b': 2})]))
def test_gbk_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create([None])
side = p | 'side' >> beam.Create([('a', 1)]) | beam.GroupByKey()
assert_that(
main | beam.Map(lambda a, b: (a, b), beam.pvalue.AsDict(side)),
equal_to([(None, {'a': [1]})]))
def test_multimap_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create(['a', 'b'])
side = p | 'side' >> beam.Create([('a', 1), ('b', 2), ('a', 3)])
assert_that(
main | beam.Map(lambda k, d: (k, sorted(d[k])),
beam.pvalue.AsMultiMap(side)),
equal_to([('a', [1, 3]), ('b', [2])]))
def test_pardo_unfusable_side_inputs(self):
def cross_product(elem, sides):
for side in sides:
yield elem, side
with self.create_pipeline() as p:
pcoll = p | beam.Create(['a', 'b'])
assert_that(
pcoll | beam.FlatMap(cross_product, beam.pvalue.AsList(pcoll)),
equal_to([('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]))
with self.create_pipeline() as p:
pcoll = p | beam.Create(['a', 'b'])
derived = ((pcoll,) | beam.Flatten()
| beam.Map(lambda x: (x, x))
| beam.GroupByKey()
| 'Unkey' >> beam.Map(lambda kv: kv[0]))
assert_that(
pcoll | beam.FlatMap(cross_product, beam.pvalue.AsList(derived)),
equal_to([('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]))
def test_group_by_key(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([('a', 1), ('a', 2), ('b', 3)])
| beam.GroupByKey()
| beam.Map(lambda k_vs: (k_vs[0], sorted(k_vs[1]))))
assert_that(res, equal_to([('a', [1, 2]), ('b', [3])]))
def test_flatten(self):
with self.create_pipeline() as p:
res = (p | 'a' >> beam.Create(['a']),
p | 'bc' >> beam.Create(['b', 'c']),
p | 'd' >> beam.Create(['d'])) | beam.Flatten()
assert_that(res, equal_to(['a', 'b', 'c', 'd']))
def test_combine_per_key(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([('a', 1), ('a', 2), ('b', 3)])
| beam.CombinePerKey(beam.combiners.MeanCombineFn()))
assert_that(res, equal_to([('a', 1.5), ('b', 3.0)]))
def test_read(self):
# Can't use NamedTemporaryFile as a context
# due to https://bugs.python.org/issue14243
temp_file = tempfile.NamedTemporaryFile(delete=False)
try:
temp_file.write('a\nb\nc')
temp_file.close()
with self.create_pipeline() as p:
assert_that(p | beam.io.ReadFromText(temp_file.name),
equal_to(['a', 'b', 'c']))
finally:
os.unlink(temp_file.name)
def test_windowing(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([1, 2, 100, 101, 102])
| beam.Map(lambda t: window.TimestampedValue(('k', t), t))
| beam.WindowInto(beam.transforms.window.Sessions(10))
| beam.GroupByKey()
| beam.Map(lambda k_vs1: (k_vs1[0], sorted(k_vs1[1]))))
assert_that(res, equal_to([('k', [1, 2]), ('k', [100, 101, 102])]))
def test_error_message_includes_stage(self):
with self.assertRaises(BaseException) as e_cm:
with self.create_pipeline() as p:
def raise_error(x):
raise RuntimeError('x')
# pylint: disable=expression-not-assigned
(p
| beam.Create(['a', 'b'])
| 'StageA' >> beam.Map(lambda x: x)
| 'StageB' >> beam.Map(lambda x: x)
| 'StageC' >> beam.Map(raise_error)
| 'StageD' >> beam.Map(lambda x: x))
self.assertIn('StageC', e_cm.exception.args[0])
self.assertNotIn('StageB', e_cm.exception.args[0])
def test_error_traceback_includes_user_code(self):
def first(x):
return second(x)
def second(x):
return third(x)
def third(x):
raise ValueError('x')
try:
with self.create_pipeline() as p:
p | beam.Create([0]) | beam.Map(first) # pylint: disable=expression-not-assigned
except Exception: # pylint: disable=broad-except
message = traceback.format_exc()
else:
raise AssertionError('expected exception not raised')
self.assertIn('first', message)
self.assertIn('second', message)
self.assertIn('third', message)
def test_no_subtransform_composite(self):
class First(beam.PTransform):
def expand(self, pcolls):
return pcolls[0]
with self.create_pipeline() as p:
pcoll_a = p | 'a' >> beam.Create(['a'])
pcoll_b = p | 'b' >> beam.Create(['b'])
assert_that((pcoll_a, pcoll_b) | First(), equal_to(['a']))
def test_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Metrics not supported.')
counter = beam.metrics.Metrics.counter('ns', 'counter')
distribution = beam.metrics.Metrics.distribution('ns', 'distribution')
gauge = beam.metrics.Metrics.gauge('ns', 'gauge')
pcoll = p | beam.Create(['a', 'zzz'])
# pylint: disable=expression-not-assigned
pcoll | 'count1' >> beam.FlatMap(lambda x: counter.inc())
pcoll | 'count2' >> beam.FlatMap(lambda x: counter.inc(len(x)))
pcoll | 'dist' >> beam.FlatMap(lambda x: distribution.update(len(x)))
pcoll | 'gauge' >> beam.FlatMap(lambda x: gauge.set(len(x)))
res = p.run()
res.wait_until_finish()
c1, = res.metrics().query(beam.metrics.MetricsFilter().with_step('count1'))[
'counters']
self.assertEqual(c1.committed, 2)
c2, = res.metrics().query(beam.metrics.MetricsFilter().with_step('count2'))[
'counters']
self.assertEqual(c2.committed, 4)
dist, = res.metrics().query(beam.metrics.MetricsFilter().with_step('dist'))[
'distributions']
gaug, = res.metrics().query(
beam.metrics.MetricsFilter().with_step('gauge'))['gauges']
self.assertEqual(
dist.committed.data, beam.metrics.cells.DistributionData(4, 2, 1, 3))
self.assertEqual(dist.committed.mean, 2.0)
self.assertEqual(gaug.committed.value, 3)
def test_progress_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Progress metrics not supported.')
_ = (p
| beam.Create([0, 0, 0, 5e-3 * DEFAULT_SAMPLING_PERIOD_MS])
| beam.Map(time.sleep)
| beam.Map(lambda x: ('key', x))
| beam.GroupByKey()
| 'm_out' >> beam.FlatMap(lambda x: [
1, 2, 3, 4, 5,
beam.pvalue.TaggedOutput('once', x),
beam.pvalue.TaggedOutput('twice', x),
beam.pvalue.TaggedOutput('twice', x)]))
res = p.run()
res.wait_until_finish()
try:
self.assertEqual(2, len(res._metrics_by_stage))
pregbk_metrics, postgbk_metrics = res._metrics_by_stage.values()
if 'Create/Read' not in pregbk_metrics.ptransforms:
# The metrics above are actually unordered. Swap.
pregbk_metrics, postgbk_metrics = postgbk_metrics, pregbk_metrics
self.assertEqual(
4,
pregbk_metrics.ptransforms['Create/Read']
.processed_elements.measured.output_element_counts['None'])
self.assertEqual(
4,
pregbk_metrics.ptransforms['Map(sleep)']
.processed_elements.measured.output_element_counts['None'])
self.assertLessEqual(
4e-3 * DEFAULT_SAMPLING_PERIOD_MS,
pregbk_metrics.ptransforms['Map(sleep)']
.processed_elements.measured.total_time_spent)
self.assertEqual(
1,
postgbk_metrics.ptransforms['GroupByKey/Read']
.processed_elements.measured.output_element_counts['None'])
# The actual stage name ends up being something like 'm_out/lamdbda...'
m_out, = [
metrics for name, metrics in postgbk_metrics.ptransforms.items()
if name.startswith('m_out')]
self.assertEqual(
5,
m_out.processed_elements.measured.output_element_counts['None'])
self.assertEqual(
1,
m_out.processed_elements.measured.output_element_counts['once'])
self.assertEqual(
2,
m_out.processed_elements.measured.output_element_counts['twice'])
except:
print(res._metrics_by_stage)
raise
class FnApiRunnerTestWithGrpc(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=True))
class FnApiRunnerTestWithGrpcMultiThreaded(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(
use_grpc=True,
sdk_harness_factory=functools.partial(
sdk_worker.SdkHarness, worker_count=2)))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
cliftbar/FlaPyDisaster | FlaPyDisaster/explosion/asteroid/asteroid_math.py | 1 | 7422 | import math
#############
# Constants #
#############
# Joules per megaton tnt
JoulesPerMegatonTNT = 4184000000000000
# Surface atmospheric density, kg/m^3
RhoZero = 1
# Scale height, assumed to be 8000m on average
H = 8000
# Drag coefficient for pre-breakup phase
CD = 2
# "Pancake Factor", represents the ratio of diameter to debris dispersion after airburst
FP = 7
# Pi...
PI = 3.1415
#############
# Functions #
#############
def hello():
ret_string = "This is the asteroid math package! Some help info is below."
print(ret_string)
def kenetic_energy(impactor_density_kgpm3, diameter_m, initial_velocity_mps):
"""
Impact energy in Joules.
:param impactor_density_kgpm3: impactor density in kg/m^3
:param diameter_m: impactor diameter in meters
:param initial_velocity_mps: initial velocity in m/s
:returns: Kenetic energy in Joules (kg-m^2/s^2)
:Reference: EarthImpactEffect.pdf, Equation 1*
"""
return (PI / 12) * impactor_density_kgpm3 * (diameter_m ** 3) * (initial_velocity_mps * initial_velocity_mps)
def joules_to_megaton_tnt(energy_j):
"""
Convert Joules to Megatons TNT.
:param energy_j: Energy in Joules(kg-m^2/s^2)
:returns: Energy in Megatons TNT
"""
return energy_j / JoulesPerMegatonTNT
def return_period_earth(energy_mttnt):
"""
Return period of Asteroid/Comet of a given energy (Megatons TNT) in Years.
:param energy_mttnt: Energy in Megatons TNT
:returns: Return period of given energy level in years
:Reference: EarthImpactEffect.pdf, Equation 3*
"""
return 109 * (energy_mttnt ** 0.78)
def breakup_altitude(impactor_density_kgpm3, diameter_m, velocity_mps, angle_rad):
"""
Altitude where air stagnation pressure surpasses Asteriod strength. Precursor to Airburst.
:param impactor_density_kgpm3: Impactor Density in kg/m^3
:param diameter_m: Impactor diameter in meters
:param velocity_mps: impactor velocity in m/s
:param angle_rad: impactor approach angle above tangent plane in radians. 90 deg, PI/2 is straight down
:returns: breakup altitude in m.
:Reference: EarthImpactEffect.pdf, Equation 11*
"""
yi = yield_strength(impactor_density_kgpm3)
if_term = max(calc_if_term(impactor_density_kgpm3, diameter_m, velocity_mps, angle_rad), 0)
z_star = 0
if if_term <= 1:
z_star = -H * (math.log(yi / (velocity_mps * velocity_mps)) + 1.308 - (0.314 * if_term) - (1.303 * math.sqrt(1 - if_term)))
return z_star
def yield_strength(impactor_density_kgpm3):
"""
Yield strength equation for breakup altitude calculation. Only valid for density range 1000 to 8000.
:param impactor_density_kgpm3: Impactor density in kg/m^3
:returns: Yield Strength in Pascals.
:Reference: EarthImpactEffect.pdf, Equation 10
"""
return 10 ** (2.107 + (0.0624 * math.sqrt(impactor_density_kgpm3)))
def calc_if_term(impactor_density_kgpm3, diameter_m, velocity_mps, angle_rad):
"""
If term for breakup altitude equation.
:param impactor_density_kgpm3: Impactor density in kg/m^3
:param diameter_m: Impactor diameter in meters
:param velocity_mps: Impactor velocity in km/s
:param angle_rad: impactor approach angle above tangent plane in radians. 90 deg, PI/2 is straight down
:returns: If term for breakup altitude equation
:Reference: EarthImpactEffect.pdf, Equation 12*
"""
numerator = 4.07 * CD * H * yield_strength(impactor_density_kgpm3)
denominator = impactor_density_kgpm3 * diameter_m * (velocity_mps * velocity_mps) * math.sin(angle_rad)
return numerator / denominator
def atmospheric_density(altitude_m):
"""
Returns the atmospheric density at a given height in kg/m^3
:param altitude_m: input height in meters
:returns: density of atmosphere in kg.m^3
:Reference: EarthImpactEffect.pdf, Equation 5
"""
return RhoZero * math.exp(-1 * altitude_m / H)
def airburst_altitude(breakup_altitude_m, diameter_m, impactor_density_kgpm3, angle_rad):
"""
Altitude of Airburst, occurs after Breakup.
:param breakup_altitude_m: Breakup Altitude in meters
:param diameter_m: Impactor diameter in meters
:param impactor_density_kgpm3: Impactor diameter in kg/m^3
:param angle_rad: impactor approach angle above tangent plane in radians. 90 deg, PI/2 is straight down
:returns: Airbust height in meters. If zero, there is no airburst
"""
air_density_at_breakup = atmospheric_density(breakup_altitude_m)
l = diameter_m * math.sin(angle_rad) * math.sqrt(impactor_density_kgpm3 / (CD * air_density_at_breakup))
second_term = 2 * H * math.log(1 + ((l / (2 * H)) * math.sqrt((FP * FP) - 1)))
if breakup_altitude_m > second_term:
return breakup_altitude_m - second_term
else:
return 0
def velocity_at_altitude_pre_breakup(altitude_m, init_velocity_mps, diameter_m, impactor_density_kgpm3, angle_rad):
"""
Calculates the velocity at a given height, as the object experiences drag, valid before the impactor breaks up.
:param altitude_m: input height in meters
:param init_velocity_mps: Velocity when first impacting the atmosphere.
:param diameter_m: diameter of impactor
:param impactor_density_kgpm3: impactor density in kg/m^3
:param angle_rad: impactor angle in radians
:returns: velocity at the given height in m/s
:Reference: EarthImpactEffect.pdf, Equation 8
"""
return init_velocity_mps * math.exp((-3 * atmospheric_density(altitude_m) * CD * H) / (4 * impactor_density_kgpm3 * diameter_m * math.sin(angle_rad)))
def post_breakup_velocity(breakup_altitude_m, breakup_velocity_mps, diameter_m, impactor_density_kgpm3, angle_rad, is_airburst):
"""
Calculates the velocity directly after a breakup or airburst occurs.
:param breakup_altitude_m: Height that the breakup occurs in meters
:param breakup_velocity_mps: Velocity when breakup occurs in m/s
:param diameter_m: Impactor diameter in meters
:param impactor_density_kgpm3: Impactor density in kg/m^3
:param angle_rad: Impactor appreak angle in radians
:param is_airburst: Whether the event is an airburst. If an airburst occurs, a the velocity given is the velocity directly after the airburst.
If no airburst occurs, the velocity given is the velocity at impact.
:Reference: EarthImpactEffect.pdf, Equation 17*, 19*, 20; Fig. 2 a, b
"""
second_term = 0
atmo_density_at_breakup = atmospheric_density(breakup_altitude_m)
l_term = diameter_m * math.sin(angle_rad) * math.sqrt(impactor_density_kgpm3 / (CD * atmo_density_at_breakup))
if is_airburst:
alpha_term = math.sqrt((FP * FP) - 1)
second_term = (l_term * diameter_m * diameter_m * alpha_term / 24) * ((8 * (3 + (alpha_term * alpha_term))) + ((3 * alpha_term * l_term / H) * (2 + (alpha_term * alpha_term))))
else:
second_term = ((H ** 3) * diameter_m * diameter_m) / (3 * l_term * l_term) * ((3 * (4 + ((l_term / H) ** 2)) * math.exp(breakup_altitude_m / H)) + (6 * math.exp(2 * breakup_altitude_m / H)) - (16 * math.exp(2 * breakup_altitude_m / H)) - (3 * ((l_term / H) ** 2)) - 2)
airburst_velocity_mps = breakup_velocity_mps * math.exp((-3 * atmo_density_at_breakup * CD) / (4 * impactor_density_kgpm3 * (diameter_m ** 3) * math.sin(angle_rad)) * second_term)
return airburst_velocity_mps
| mit |
heke123/chromium-crosswalk | third_party/cython/src/Cython/Compiler/Tests/TestTreeFragment.py | 131 | 2192 | from Cython.TestUtils import CythonTest
from Cython.Compiler.TreeFragment import *
from Cython.Compiler.Nodes import *
from Cython.Compiler.UtilNodes import *
import Cython.Compiler.Naming as Naming
class TestTreeFragments(CythonTest):
def test_basic(self):
F = self.fragment(u"x = 4")
T = F.copy()
self.assertCode(u"x = 4", T)
def test_copy_is_taken(self):
F = self.fragment(u"if True: x = 4")
T1 = F.root
T2 = F.copy()
self.assertEqual("x", T2.stats[0].if_clauses[0].body.lhs.name)
T2.stats[0].if_clauses[0].body.lhs.name = "other"
self.assertEqual("x", T1.stats[0].if_clauses[0].body.lhs.name)
def test_substitutions_are_copied(self):
T = self.fragment(u"y + y").substitute({"y": NameNode(pos=None, name="x")})
self.assertEqual("x", T.stats[0].expr.operand1.name)
self.assertEqual("x", T.stats[0].expr.operand2.name)
self.assert_(T.stats[0].expr.operand1 is not T.stats[0].expr.operand2)
def test_substitution(self):
F = self.fragment(u"x = 4")
y = NameNode(pos=None, name=u"y")
T = F.substitute({"x" : y})
self.assertCode(u"y = 4", T)
def test_exprstat(self):
F = self.fragment(u"PASS")
pass_stat = PassStatNode(pos=None)
T = F.substitute({"PASS" : pass_stat})
self.assert_(isinstance(T.stats[0], PassStatNode), T)
def test_pos_is_transferred(self):
F = self.fragment(u"""
x = y
x = u * v ** w
""")
T = F.substitute({"v" : NameNode(pos=None, name="a")})
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
self.assertEquals(v.pos, a.pos)
def test_temps(self):
TemplateTransform.temp_name_counter = 0
F = self.fragment(u"""
TMP
x = TMP
""")
T = F.substitute(temps=[u"TMP"])
s = T.body.stats
self.assert_(isinstance(s[0].expr, TempRefNode))
self.assert_(isinstance(s[1].rhs, TempRefNode))
self.assert_(s[0].expr.handle is s[1].rhs.handle)
if __name__ == "__main__":
import unittest
unittest.main()
| bsd-3-clause |
OpenFacetracker/facetracker-core | lib/youtube-dl/youtube_dl/extractor/dump.py | 34 | 1053 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class DumpIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
_TEST = {
'url': 'http://www.dump.com/oneus/',
'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
'info_dict': {
'id': 'oneus',
'ext': 'flv',
'title': "He's one of us.",
'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
thumb = self._og_search_thumbnail(webpage)
title = self._search_regex(r'<b>([^"]+)</b>', webpage, 'title')
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumb,
}
| gpl-2.0 |
sidartaoliveira/ansible | lib/ansible/modules/network/f5/bigip_device_ntp.py | 78 | 7567 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_device_ntp
short_description: Manage NTP servers on a BIG-IP
description:
- Manage NTP servers on a BIG-IP
version_added: "2.2"
options:
ntp_servers:
description:
- A list of NTP servers to set on the device. At least one of C(ntp_servers)
or C(timezone) is required.
required: false
default: []
state:
description:
- The state of the NTP servers on the system. When C(present), guarantees
that the NTP servers are set on the system. When C(absent), removes the
specified NTP servers from the device configuration.
required: false
default: present
choices:
- absent
- present
timezone:
description:
- The timezone to set for NTP lookups. At least one of C(ntp_servers) or
C(timezone) is required.
default: UTC
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set NTP server
bigip_device_ntp:
ntp_servers:
- "192.0.2.23"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: Set timezone
bigip_device_ntp:
password: "secret"
server: "lb.mydomain.com"
timezone: "America/Los_Angeles"
user: "admin"
validate_certs: "no"
delegate_to: localhost
'''
RETURN = '''
ntp_servers:
description: The NTP servers that were set on the device
returned: changed
type: list
sample: ["192.0.2.23", "192.0.2.42"]
timezone:
description: The timezone that was set on the device
returned: changed
type: string
sample: "true"
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpDeviceNtp(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def flush(self):
result = dict()
changed = False
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
if 'servers' in self.cparams:
self.cparams['ntp_servers'] = self.cparams.pop('servers')
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
"""
p = dict()
r = self.api.tm.sys.ntp.load()
if hasattr(r, 'servers'):
# Deliberately using sets to suppress duplicates
p['servers'] = set([str(x) for x in r.servers])
if hasattr(r, 'timezone'):
p['timezone'] = str(r.timezone)
return p
def present(self):
changed = False
params = dict()
current = self.read()
check_mode = self.params['check_mode']
ntp_servers = self.params['ntp_servers']
timezone = self.params['timezone']
# NTP servers can be set independently
if ntp_servers is not None:
if 'servers' in current:
items = set(ntp_servers)
if items != current['servers']:
params['servers'] = list(ntp_servers)
else:
params['servers'] = ntp_servers
# Timezone can be set independently
if timezone is not None:
if 'timezone' in current and current['timezone'] != timezone:
params['timezone'] = timezone
if params:
changed = True
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return changed
else:
return changed
r = self.api.tm.sys.ntp.load()
r.update(**params)
r.refresh()
return changed
def absent(self):
changed = False
params = dict()
current = self.read()
check_mode = self.params['check_mode']
ntp_servers = self.params['ntp_servers']
if not ntp_servers:
raise F5ModuleError(
"Absent can only be used when removing NTP servers"
)
if ntp_servers and 'servers' in current:
servers = current['servers']
new_servers = [x for x in servers if x not in ntp_servers]
if servers != new_servers:
params['servers'] = new_servers
if params:
changed = True
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return changed
else:
return changed
r = self.api.tm.sys.ntp.load()
r.update(**params)
r.refresh()
return changed
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
ntp_servers=dict(required=False, type='list', default=None),
timezone=dict(default=None, required=False)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['ntp_servers', 'timezone']
],
supports_check_mode=True
)
try:
obj = BigIpDeviceNtp(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| gpl-3.0 |
hurricup/intellij-community | python/lib/Lib/site-packages/django/contrib/localflavor/id/forms.py | 311 | 6834 | """
ID-specific Form helpers
"""
import re
import time
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
postcode_re = re.compile(r'^[1-9]\d{4}$')
phone_re = re.compile(r'^(\+62|0)[2-9]\d{7,10}$')
plate_re = re.compile(r'^(?P<prefix>[A-Z]{1,2}) ' + \
r'(?P<number>\d{1,5})( (?P<suffix>([A-Z]{1,3}|[1-9][0-9]{,2})))?$')
nik_re = re.compile(r'^\d{16}$')
class IDPostCodeField(Field):
"""
An Indonesian post code field.
http://id.wikipedia.org/wiki/Kode_pos
"""
default_error_messages = {
'invalid': _('Enter a valid post code'),
}
def clean(self, value):
super(IDPostCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.strip()
if not postcode_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) < 10110:
raise ValidationError(self.error_messages['invalid'])
# 1xxx0
if value[0] == '1' and value[4] != '0':
raise ValidationError(self.error_messages['invalid'])
return u'%s' % (value, )
class IDProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of Indonesia as its
choices.
"""
def __init__(self, attrs=None):
from id_choices import PROVINCE_CHOICES
super(IDProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class IDPhoneNumberField(Field):
"""
An Indonesian telephone number field.
http://id.wikipedia.org/wiki/Daftar_kode_telepon_di_Indonesia
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(IDPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
phone_number = re.sub(r'[\-\s\(\)]', '', smart_unicode(value))
if phone_re.search(phone_number):
return smart_unicode(value)
raise ValidationError(self.error_messages['invalid'])
class IDLicensePlatePrefixSelect(Select):
"""
A Select widget that uses a list of vehicle license plate prefix code
of Indonesia as its choices.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
"""
def __init__(self, attrs=None):
from id_choices import LICENSE_PLATE_PREFIX_CHOICES
super(IDLicensePlatePrefixSelect, self).__init__(attrs,
choices=LICENSE_PLATE_PREFIX_CHOICES)
class IDLicensePlateField(Field):
"""
An Indonesian vehicle license plate field.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
Plus: "B 12345 12"
"""
default_error_messages = {
'invalid': _('Enter a valid vehicle license plate number'),
}
def clean(self, value):
super(IDLicensePlateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
plate_number = re.sub(r'\s+', ' ',
smart_unicode(value.strip())).upper()
matches = plate_re.search(plate_number)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure prefix is in the list of known codes.
from id_choices import LICENSE_PLATE_PREFIX_CHOICES
prefix = matches.group('prefix')
if prefix not in [choice[0] for choice in LICENSE_PLATE_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['invalid'])
# Only Jakarta (prefix B) can have 3 letter suffix.
suffix = matches.group('suffix')
if suffix is not None and len(suffix) == 3 and prefix != 'B':
raise ValidationError(self.error_messages['invalid'])
# RI plates don't have suffix.
if prefix == 'RI' and suffix is not None and suffix != '':
raise ValidationError(self.error_messages['invalid'])
# Number can't be zero.
number = matches.group('number')
if number == '0':
raise ValidationError(self.error_messages['invalid'])
# CD, CC and B 12345 12
if len(number) == 5 or prefix in ('CD', 'CC'):
# suffix must be numeric and non-empty
if re.match(r'^\d+$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
# Known codes range is 12-124
if prefix in ('CD', 'CC') and not (12 <= int(number) <= 124):
raise ValidationError(self.error_messages['invalid'])
if len(number) == 5 and not (12 <= int(suffix) <= 124):
raise ValidationError(self.error_messages['invalid'])
else:
# suffix must be non-numeric
if suffix is not None and re.match(r'^[A-Z]{,3}$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
return plate_number
class IDNationalIdentityNumberField(Field):
"""
An Indonesian national identity number (NIK/KTP#) field.
http://id.wikipedia.org/wiki/Nomor_Induk_Kependudukan
xx.xxxx.ddmmyy.xxxx - 16 digits (excl. dots)
"""
default_error_messages = {
'invalid': _('Enter a valid NIK/KTP number'),
}
def clean(self, value):
super(IDNationalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub(r'[\s.]', '', smart_unicode(value))
if not nik_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
def valid_nik_date(year, month, day):
try:
t1 = (int(year), int(month), int(day), 0, 0, 0, 0, 0, -1)
d = time.mktime(t1)
t2 = time.localtime(d)
if t1[:3] != t2[:3]:
return False
else:
return True
except (OverflowError, ValueError):
return False
year = int(value[10:12])
month = int(value[8:10])
day = int(value[6:8])
current_year = time.localtime().tm_year
if year < int(str(current_year)[-2:]):
if not valid_nik_date(2000 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
elif not valid_nik_date(1900 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
if value[:6] == '000000' or value[12:] == '0000':
raise ValidationError(self.error_messages['invalid'])
return '%s.%s.%s.%s' % (value[:2], value[2:6], value[6:12], value[12:])
| apache-2.0 |
arecker/bennedetto | authenticating/models.py | 1 | 4423 | from __future__ import unicode_literals
from uuid import uuid4
import datetime
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
from timezone_field import TimeZoneField
from django.utils import timezone
from django.core.urlresolvers import reverse
import pytz
from authenticating.email import VerifyUserEmail
from bennedetto.utils import expand_url_path
def get_default_timezone():
return pytz.timezone('US/Central')
class PasswordsDontMatch(Exception):
pass
class IncorrectPassword(Exception):
pass
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
user = self.model(email=email)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password):
user = self.model(email=email)
user.set_password(password)
user.is_staff = True
user.save()
return user
def midnight(self, now=None):
'''
returns all users that are *currently* experiencing
the first hour of their day
'''
now = now or datetime.datetime.now(pytz.utc)
zones = [tz for tz in pytz.common_timezones_set
if now.astimezone(pytz.timezone(tz)).hour == 0]
return self.filter(timezone__in=zones)
def verify(self, key):
user = self.get(verify_key=key)
user.verified = True
user.save()
class User(AbstractBaseUser):
PasswordsDontMatch = PasswordsDontMatch
IncorrectPassword = IncorrectPassword
objects = UserManager()
id = models.UUIDField(primary_key=True,
unique=True,
editable=False,
default=uuid4)
email = models.EmailField(unique=True)
verified = models.BooleanField(default=False)
verify_key = models.UUIDField(default=uuid4, editable=False, unique=True)
timezone = TimeZoneField(default=get_default_timezone)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def get_full_name(self):
return self.get_short_name()
def get_short_name(self):
return self.email
def has_perm(self, *args, **kwargs):
return self.is_staff
def has_module_perms(self, *args):
return self.is_staff
def activate_timezone(self):
timezone.activate(self.timezone)
def send_verification_email(self):
if self.verified:
return False
VerifyUserEmail(user=self).send()
def get_verify_link(self):
key = str(self.verify_key)
path = reverse('verify', args=[key])
return expand_url_path(path)
def change_password(self, old, new):
new, new_copy = new
if not new == new_copy:
raise self.PasswordsDontMatch
if not self.check_password(old):
raise self.IncorrectPassword
self.set_password(new)
self.save()
return self
class FamilyManager(models.Manager):
def create_from_user(self, user, name):
family = self.create(name=name)
membership = Membership.objects.create(user=user,
family=family,
admin=True)
return membership.save()
class Family(models.Model):
objects = FamilyManager()
name = models.CharField(max_length=120)
members = models.ManyToManyField(User, through='Membership')
def __unicode__(self):
return self.name
def invite_user_to_family(self, email=None):
user = User.objects.create(email=email, is_active=False)
Membership(user=user, family=self, admin=False).save()
return user.save()
class Meta:
verbose_name_plural = 'Families'
class Membership(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
family = models.ForeignKey(Family, on_delete=models.CASCADE)
admin = models.BooleanField(default=False)
def __unicode__(self):
return '{0} Membership'.format(self.family.__unicode__())
@property
def family_name(self):
return self.family.name
@property
def email(self):
return self.user.email
@property
def verified(self):
return self.user.verified
| gpl-3.0 |
HiroIshikawa/21playground | microblog/flask/lib/python3.5/site-packages/pkg_resources/_vendor/packaging/__about__.py | 441 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.3"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| mit |
phra/802_21 | myODTONE/lib/external/libnl/libnl/python/netlink/route/qdisc/htb.py | 35 | 3608 | #
# Copyright (c) 2011 Thomas Graf <tgraf@suug.ch>
#
"""HTB qdisc
"""
from __future__ import absolute_import
from ... import core as netlink
from ... import util as util
from .. import capi as capi
from .. import tc as tc
class HTBQdisc(object):
def __init__(self, qdisc):
self._qdisc = qdisc
@property
@netlink.nlattr(type=int)
def default_class(self):
return tc.Handle(capi.rtnl_htb_get_defcls(self._qdisc._rtnl_qdisc))
@default_class.setter
def default_class(self, value):
capi.rtnl_htb_set_defcls(self._qdisc._rtnl_qdisc, int(value))
@property
@netlink.nlattr('r2q', type=int)
def r2q(self):
return capi.rtnl_htb_get_rate2quantum(self._qdisc._rtnl_qdisc)
@r2q.setter
def r2q(self, value):
capi.rtnl_htb_get_rate2quantum(self._qdisc._rtnl_qdisc,
int(value))
def brief(self):
fmt = util.MyFormatter(self)
ret = ' {s|default-class!k} {a|default_class}'
if self.r2q:
ret += ' {s|r2q!k} {a|r2q}'
return fmt.format(ret)
class HTBClass(object):
def __init__(self, cl):
self._class = cl
@property
@netlink.nlattr(type=str)
def rate(self):
rate = capi.rtnl_htb_get_rate(self._class._rtnl_class)
return util.Rate(rate)
@rate.setter
def rate(self, value):
capi.rtnl_htb_set_rate(self._class._rtnl_class, int(value))
@property
@netlink.nlattr(type=str)
def ceil(self):
ceil = capi.rtnl_htb_get_ceil(self._class._rtnl_class)
return util.Rate(ceil)
@ceil.setter
def ceil(self, value):
capi.rtnl_htb_set_ceil(self._class._rtnl_class, int(value))
@property
@netlink.nlattr(type=str)
def burst(self):
burst = capi.rtnl_htb_get_rbuffer(self._class._rtnl_class)
return util.Size(burst)
@burst.setter
def burst(self, value):
capi.rtnl_htb_set_rbuffer(self._class._rtnl_class, int(value))
@property
@netlink.nlattr(type=str)
def ceil_burst(self):
burst = capi.rtnl_htb_get_cbuffer(self._class._rtnl_class)
return util.Size(burst)
@ceil_burst.setter
def ceil_burst(self, value):
capi.rtnl_htb_set_cbuffer(self._class._rtnl_class, int(value))
@property
@netlink.nlattr(type=int)
def prio(self):
return capi.rtnl_htb_get_prio(self._class._rtnl_class)
@prio.setter
def prio(self, value):
capi.rtnl_htb_set_prio(self._class._rtnl_class, int(value))
@property
@netlink.nlattr(type=int)
def quantum(self):
return capi.rtnl_htb_get_quantum(self._class._rtnl_class)
@quantum.setter
def quantum(self, value):
capi.rtnl_htb_set_quantum(self._class._rtnl_class, int(value))
@property
@netlink.nlattr(type=int)
def level(self):
return capi.rtnl_htb_get_level(self._class._rtnl_class)
@level.setter
def level(self, value):
capi.rtnl_htb_set_level(self._class._rtnl_class, int(value))
def brief(self):
fmt = util.MyFormatter(self)
ret = ' {t|prio} {t|rate}'
if self.rate != self.ceil:
ret += ' {s|borrow-up-to!k} {a|ceil}'
ret += ' {t|burst}'
return fmt.format(ret)
def details(self):
fmt = util.MyFormatter(self)
return fmt.nl('\t{t|level} {t|quantum}')
def init_qdisc(qdisc):
qdisc.htb = HTBQdisc(qdisc)
return qdisc.htb
def init_class(cl):
cl.htb = HTBClass(cl)
return cl.htb
#extern void rtnl_htb_set_quantum(struct rtnl_class *, uint32_t quantum);
| gpl-2.0 |
dlazz/ansible | lib/ansible/modules/network/aci/aci_config_rollback.py | 12 | 9720 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_config_rollback
short_description: Provides rollback and rollback preview functionality (config:ImportP)
description:
- Provides rollback and rollback preview functionality for Cisco ACI fabrics.
- Config Rollbacks are done using snapshots C(aci_snapshot) with the configImportP class.
seealso:
- module: aci_config_snapshot
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(config:ImportP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
compare_export_policy:
description:
- The export policy that the C(compare_snapshot) is associated to.
type: str
compare_snapshot:
description:
- The name of the snapshot to compare with C(snapshot).
type: str
description:
description:
- The description for the Import Policy.
type: str
aliases: [ descr ]
export_policy:
description:
- The export policy that the C(snapshot) is associated to.
type: str
required: yes
fail_on_decrypt:
description:
- Determines if the APIC should fail the rollback if unable to decrypt secured data.
- The APIC defaults to C(yes) when unset.
type: bool
import_mode:
description:
- Determines how the import should be handled by the APIC.
- The APIC defaults to C(atomic) when unset.
type: str
choices: [ atomic, best-effort ]
import_policy:
description:
- The name of the Import Policy to use for config rollback.
type: str
import_type:
description:
- Determines how the current and snapshot configuration should be compared for replacement.
- The APIC defaults to C(replace) when unset.
type: str
choices: [ merge, replace ]
snapshot:
description:
- The name of the snapshot to rollback to, or the base snapshot to use for comparison.
- The C(aci_snapshot) module can be used to query the list of available snapshots.
type: str
required: yes
state:
description:
- Use C(preview) for previewing the diff between two snapshots.
- Use C(rollback) for reverting the configuration to a previous snapshot.
type: str
choices: [ preview, rollback ]
default: rollback
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
---
- name: Create a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: present
delegate_to: localhost
- name: Query Existing Snapshots
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: query
delegate_to: localhost
- name: Compare Snapshot Files
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
compare_export_policy: config_backup
compare_snapshot: run-2017-08-27T23-43-56
state: preview
delegate_to: localhost
- name: Rollback Configuration
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
import_policy: rollback_config
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
state: rollback
delegate_to: localhost
- name: Rollback Configuration
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
import_policy: rollback_config
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
description: Rollback 8-27 changes
import_mode: atomic
import_type: replace
fail_on_decrypt: yes
state: rollback
delegate_to: localhost
'''
RETURN = r'''
preview:
description: A preview between two snapshots
returned: when state is preview
type: str
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
from ansible.module_utils.urls import fetch_url
# Optional, only used for rollback preview
try:
import lxml.etree
from xmljson import cobra
XML_TO_JSON = True
except ImportError:
XML_TO_JSON = False
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
compare_export_policy=dict(type='str'),
compare_snapshot=dict(type='str'),
description=dict(type='str', aliases=['descr']),
export_policy=dict(type='str'),
fail_on_decrypt=dict(type='bool'),
import_mode=dict(type='str', choices=['atomic', 'best-effort']),
import_policy=dict(type='str'),
import_type=dict(type='str', choices=['merge', 'replace']),
snapshot=dict(type='str', required=True),
state=dict(type='str', default='rollback', choices=['preview', 'rollback']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['state', 'preview', ['compare_export_policy', 'compare_snapshot']],
['state', 'rollback', ['import_policy']],
],
)
aci = ACIModule(module)
description = module.params['description']
export_policy = module.params['export_policy']
fail_on_decrypt = aci.boolean(module.params['fail_on_decrypt'])
import_mode = module.params['import_mode']
import_policy = module.params['import_policy']
import_type = module.params['import_type']
snapshot = module.params['snapshot']
state = module.params['state']
if state == 'rollback':
if snapshot.startswith('run-'):
snapshot = snapshot.replace('run-', '', 1)
if not snapshot.endswith('.tar.gz'):
snapshot += '.tar.gz'
filename = 'ce2_{0}-{1}'.format(export_policy, snapshot)
aci.construct_url(
root_class=dict(
aci_class='configImportP',
aci_rn='fabric/configimp-{0}'.format(import_policy),
module_object=import_policy,
target_filter={'name': import_policy},
),
)
aci.get_existing()
aci.payload(
aci_class='configImportP',
class_config=dict(
adminSt='triggered',
descr=description,
failOnDecryptErrors=fail_on_decrypt,
fileName=filename,
importMode=import_mode,
importType=import_type,
name=import_policy,
snapshot='yes',
),
)
aci.get_diff(aci_class='configImportP')
aci.post_config()
elif state == 'preview':
aci.url = '%(protocol)s://%(host)s/mqapi2/snapshots.diff.xml' % module.params
aci.filter_string = (
'?s1dn=uni/backupst/snapshots-[uni/fabric/configexp-%(export_policy)s]/snapshot-%(snapshot)s&'
's2dn=uni/backupst/snapshots-[uni/fabric/configexp-%(compare_export_policy)s]/snapshot-%(compare_snapshot)s'
) % module.params
# Generate rollback comparison
get_preview(aci)
aci.exit_json()
def get_preview(aci):
'''
This function is used to generate a preview between two snapshots and add the parsed results to the aci module return data.
'''
uri = aci.url + aci.filter_string
resp, info = fetch_url(aci.module, uri, headers=aci.headers, method='GET', timeout=aci.module.params['timeout'], use_proxy=aci.module.params['use_proxy'])
aci.method = 'GET'
aci.response = info['msg']
aci.status = info['status']
# Handle APIC response
if info['status'] == 200:
xml_to_json(aci, resp.read())
else:
aci.result['raw'] = resp.read()
aci.fail_json(msg="Request failed: %(code)s %(text)s (see 'raw' output)" % aci.error)
def xml_to_json(aci, response_data):
'''
This function is used to convert preview XML data into JSON.
'''
if XML_TO_JSON:
xml = lxml.etree.fromstring(to_bytes(response_data))
xmldata = cobra.data(xml)
aci.result['preview'] = xmldata
else:
aci.result['preview'] = response_data
if __name__ == "__main__":
main()
| gpl-3.0 |
knittledan/Location_Search_Prediction | thirdParty/yaml/mac/yaml_py32/yaml/nodes.py | 985 | 1440 |
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
| mit |
Chilledheart/chromium | tools/telemetry/telemetry/core/android_action_runner.py | 9 | 6107 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from telemetry.core import util
class ActionNotSupported(Exception):
pass
class AndroidActionRunner(object):
"""Provides an API for interacting with an android device.
This makes use of functionality provided by the android input command. None
of the gestures here are guaranteed to be performant for telemetry tests and
there is no official support for this API.
TODO(ariblue): Replace this API with a better implementation for interacting
with native components.
"""
def __init__(self, platform_backend):
self._platform_backend = platform_backend
def SmoothScrollBy(self, left_start_coord, top_start_coord, direction,
scroll_distance):
"""Perfrom gesture to scroll down on the android device.
"""
if direction not in ['down', 'up', 'left', 'right']:
raise ActionNotSupported('Invalid scroll direction: %s' % direction)
# This velocity is slower so that the exact distance we specify is the
# distance the page travels.
duration = scroll_distance
# Note that the default behavior is swiping up for scrolling down.
if direction == 'down':
left_end_coord = left_start_coord
top_end_coord = top_start_coord - scroll_distance
elif direction == 'up':
left_end_coord = left_start_coord
top_end_coord = top_start_coord + scroll_distance
elif direction == 'right':
left_end_coord = left_start_coord - scroll_distance
top_end_coord = top_start_coord
elif direction == 'left':
left_end_coord = left_start_coord + scroll_distance
top_end_coord = top_start_coord
self.InputSwipe(left_start_coord, top_start_coord, left_end_coord,
top_end_coord, duration)
def Wait(self, seconds):
"""Wait for the number of seconds specified.
Args:
seconds: The number of seconds to wait.
"""
time.sleep(seconds)
def InputText(self, string):
"""Convert the characters of the string into key events and send to device.
Args:
string: The string to send to the device.
"""
self._platform_backend.device.RunShellCommand('input text %s' % string)
def InputKeyEvent(self, key):
"""Send a single key input to the device.
Args:
key: A key code number or name that will be sent to the device
"""
self._platform_backend.device.RunShellCommand('input keyevent %s' % key)
def InputTap(self, x_coord, y_coord):
"""Perform a tap input at the given coordinates.
Args:
x_coord: The x coordinate of the tap event.
y_coord: The y coordinate of the tap event.
"""
self._platform_backend.device.RunShellCommand('input tap %s %s' % (x_coord,
y_coord))
def InputSwipe(self, left_start_coord, top_start_coord, left_end_coord,
top_end_coord, duration):
"""Perform a swipe input.
Args:
left_start_coord: The horizontal starting coordinate of the gesture
top_start_coord: The vertical starting coordinate of the gesture
left_end_coord: The horizontal ending coordinate of the gesture
top_end_coord: The vertical ending coordinate of the gesture
duration: The length of time of the swipe in milliseconds
"""
self._platform_backend.device.RunShellCommand(
'input swipe %s %s %s %s %s' % (left_start_coord, top_start_coord,
left_end_coord, top_end_coord,
duration))
def InputPress(self):
"""Perform a press input."""
self._platform_backend.device.RunShellCommand('input press')
def InputRoll(self, dx, dy):
"""Perform a roll input. This sends a simple zero-pressure move event.
Args:
dx: Change in the x coordinate due to move.
dy: Change in the y coordinate due to move.
"""
self._platform_backend.device.RunShellCommand('input roll %s %s' % (dx, dy))
def EnsureScreenOn(self):
"""If device screen is off, turn screen on.
If the screen is already on, return immediately.
Raises:
Timeout: If the screen is off and device fails to turn screen on.
"""
if self._platform_backend.IsScreenOn():
return
self._ToggleScreenOn()
util.WaitFor(self._platform_backend.IsScreenOn, 5)
def TurnScreenOn(self):
"""If device screen is off, turn screen on.
If the screen is already on, log a warning and return immediately.
Raises:
Timeout: If the screen is off and device fails to turn screen on.
"""
if not self._platform_backend.IsScreenOn():
self._ToggleScreenOn()
else:
logging.warning('Screen on when expected off.')
return
util.WaitFor(self._platform_backend.IsScreenOn, 5)
def TurnScreenOff(self):
"""If device screen is on, turn screen off.
If the screen is already off, log a warning and return immediately.
Raises:
Timeout: If the screen is on and device fails to turn screen off.
"""
def is_screen_off():
return not self._platform_backend.IsScreenOn()
if self._platform_backend.IsScreenOn():
self._ToggleScreenOn()
else:
logging.warning('Screen off when expected on.')
return
util.WaitFor(is_screen_off, 5)
def UnlockScreen(self):
"""If device screen is locked, unlocks it.
If the device is not locked, log a warning and return immediately.
Raises:
Timeout: If device fails to unlock screen.
"""
def is_screen_unlocked():
return not self._platform_backend.IsScreenLocked()
if self._platform_backend.IsScreenLocked():
self._platform_backend.device.RunShellCommand('input keyevent 82')
else:
logging.warning('Screen not locked when expected.')
return
util.WaitFor(is_screen_unlocked, 5)
def _ToggleScreenOn(self):
self._platform_backend.device.RunShellCommand('input keyevent 26')
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/django/contrib/gis/geoip/prototypes.py | 28 | 3949 | from ctypes import POINTER, Structure, c_char_p, c_float, c_int, string_at
from django.contrib.gis.geoip.libgeoip import free, lgeoip
# #### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accommodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
GEOIP_DEFAULT_ENCODING = 'iso-8859-1'
geoip_encodings = {
0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure):
pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# #### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if result:
# Checking the pointer to the C structure, if valid pull out elements
# into a dictionary.
rec = result.contents
record = {fld: getattr(rec, fld) for fld, ctype in rec._fields_}
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s.decode(GEOIP_DEFAULT_ENCODING)
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
def _err_check(result, func, cargs):
if result:
return result.decode(GEOIP_DEFAULT_ENCODING)
return result
func.restype = c_char_p
func.errcheck = _err_check
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
| mit |
phantomas1234/fbaproject | work/rndMediumKnocks/startClients.py | 1 | 2567 | #!/usr/bin/env python
# encoding: utf-8
"""
startClients.py
Created by Nikolaus Sonnenschein on 2008-10-09.
Copyright (c) 2008 Jacobs University of Bremen. All rights reserved.
"""
import urllib
import re
import string
import pprint
import os
import random
class clamvParser:
"""docstring for clamvParser"""
def __init__(self):
self.url = 'http://www.clamv.iu-bremen.de/CLAMV/Wizard/Accounting/html/CLAMVCluster.html'
self.page = urllib.urlopen(self.url).read()
regEx = re.compile('<! build Table 1 in the html file>.+<! end of table 1>', re.S)
self.lines = string.split(regEx.search(self.page).group(), "\n")[7:-2]
reg = re.compile('<td>(.+?)</td>')
self.table = [reg.findall(elem)for elem in self.lines]
def __str__(self):
print self.table
class clamvFilter(clamvParser):
def _sortBy_Load(self, x, y):
if x[3] < y[3]:
return 1
elif x[3] == y[3]:
return 0
else:
return -1
def filterClusterLoad(self, threshold=0.):
tab = filter(lambda x: float(x[-3]) <= threshold, self.table)
tab.sort(self._sortBy_Load)
return tab
def filterNumProcesses(self, threshold=0):
return filter(lambda x: float(x[-2]) <= threshold, self.table)
def helperFunc(table, oldOnesThresh=40):
l = list()
print table
for i in table:
tmp = i[1:]
if i[0] == 'tlabterm':
break
tmp.insert(0, i[0][4:])
l.append(tmp)
return [i for i in l if int(i[0]) > oldOnesThresh]
def getHosts(numProc):
clamv = clamvFilter()
# print clamv
# pprint.pprint(clamv.filterClusterLoad(1.))
output = helperFunc(clamv.filterNumProcesses(numProc))
hosts = ['tlab'+i[0]+'.clamv.iu-bremen.de' for i in output]
return hosts
def startClient(host):
# os.system('ssh nsonnensch@' + host + ' "cd Sandbox; ./remoteTest &"')
# os.system('ssh nsonnensch@' + host + ' bash -c "Sandbox/remoteTest &" &')
c = ' "source .bash_profile ; cd ./Sandbox/ifba/ifba/rndMediumKnocks/ ; python client.py 212.201.48.108 > /dev/null &"'
os.system('ssh nsonnensch@' + host + c + ' &')
if __name__ == '__main__':
hostsZero = set(getHosts(0))
print hostsZero
print len(hostsZero)
hostsOne = set(random.sample(set(getHosts(1)).difference(hostsZero),10))
print hostsOne.union(hostsZero)
print len(hostsOne.union(hostsZero))
superHosts = list(hostsOne.union(hostsZero))
for host in superHosts:
print 'starting client on ', host
startClient(host)
print "bye, bye my dear"
exit()
| mit |
Nikoala/CouchPotatoServer | couchpotato/core/notifications/xmpp_.py | 96 | 2910 | from time import sleep
import traceback
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import xmpp
log = CPLog(__name__)
autoload = 'Xmpp'
class Xmpp(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
jid = xmpp.protocol.JID(self.conf('username'))
client = xmpp.Client(jid.getDomain(), debug = [])
# Connect
if not client.connect(server = (self.conf('hostname'), self.conf('port'))):
log.error('XMPP failed: Connection to server failed.')
return False
# Authenticate
if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()):
log.error('XMPP failed: Failed to authenticate.')
return False
# Send message
client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat'))
# Disconnect
# some older servers will not send the message if you disconnect immediately after sending
sleep(1)
client.disconnect()
log.info('XMPP notifications sent.')
return True
except:
log.error('XMPP failed: %s', traceback.format_exc())
return False
config = [{
'name': 'xmpp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'xmpp',
'label': 'XMPP',
'description`': 'for Jabber, Hangouts (Google Talk), AIM...',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'username',
'description': 'User sending the message. For Hangouts, e-mail of a single-step authentication Google account.',
},
{
'name': 'password',
'type': 'Password',
},
{
'name': 'hostname',
'default': 'talk.google.com',
},
{
'name': 'to',
'description': 'Username (or e-mail for Hangouts) of the person to send the messages to.',
},
{
'name': 'port',
'type': 'int',
'default': 5222,
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
rocky4570/moto | moto/datapipeline/models.py | 9 | 5030 | from __future__ import unicode_literals
import datetime
import boto.datapipeline
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel
from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys
class PipelineObject(BaseModel):
def __init__(self, object_id, name, fields):
self.object_id = object_id
self.name = name
self.fields = fields
def to_json(self):
return {
"fields": self.fields,
"id": self.object_id,
"name": self.name,
}
class Pipeline(BaseModel):
def __init__(self, name, unique_id, **kwargs):
self.name = name
self.unique_id = unique_id
self.description = kwargs.get('description', '')
self.pipeline_id = get_random_pipeline_id()
self.creation_time = datetime.datetime.utcnow()
self.objects = []
self.status = "PENDING"
self.tags = kwargs.get('tags', [])
@property
def physical_resource_id(self):
return self.pipeline_id
def to_meta_json(self):
return {
"id": self.pipeline_id,
"name": self.name,
}
def to_json(self):
return {
"description": self.description,
"fields": [{
"key": "@pipelineState",
"stringValue": self.status,
}, {
"key": "description",
"stringValue": self.description
}, {
"key": "name",
"stringValue": self.name
}, {
"key": "@creationTime",
"stringValue": datetime.datetime.strftime(self.creation_time, '%Y-%m-%dT%H-%M-%S'),
}, {
"key": "@id",
"stringValue": self.pipeline_id,
}, {
"key": "@sphere",
"stringValue": "PIPELINE"
}, {
"key": "@version",
"stringValue": "1"
}, {
"key": "@userId",
"stringValue": "924374875933"
}, {
"key": "@accountId",
"stringValue": "924374875933"
}, {
"key": "uniqueId",
"stringValue": self.unique_id
}],
"name": self.name,
"pipelineId": self.pipeline_id,
"tags": self.tags
}
def set_pipeline_objects(self, pipeline_objects):
self.objects = [
PipelineObject(pipeline_object['id'], pipeline_object[
'name'], pipeline_object['fields'])
for pipeline_object in remove_capitalization_of_dict_keys(pipeline_objects)
]
def activate(self):
self.status = "SCHEDULED"
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
datapipeline_backend = datapipeline_backends[region_name]
properties = cloudformation_json["Properties"]
cloudformation_unique_id = "cf-" + properties["Name"]
pipeline = datapipeline_backend.create_pipeline(
properties["Name"], cloudformation_unique_id)
datapipeline_backend.put_pipeline_definition(
pipeline.pipeline_id, properties["PipelineObjects"])
if properties["Activate"]:
pipeline.activate()
return pipeline
class DataPipelineBackend(BaseBackend):
def __init__(self):
self.pipelines = OrderedDict()
def create_pipeline(self, name, unique_id, **kwargs):
pipeline = Pipeline(name, unique_id, **kwargs)
self.pipelines[pipeline.pipeline_id] = pipeline
return pipeline
def list_pipelines(self):
return self.pipelines.values()
def describe_pipelines(self, pipeline_ids):
pipelines = [pipeline for pipeline in self.pipelines.values(
) if pipeline.pipeline_id in pipeline_ids]
return pipelines
def get_pipeline(self, pipeline_id):
return self.pipelines[pipeline_id]
def delete_pipeline(self, pipeline_id):
self.pipelines.pop(pipeline_id, None)
def put_pipeline_definition(self, pipeline_id, pipeline_objects):
pipeline = self.get_pipeline(pipeline_id)
pipeline.set_pipeline_objects(pipeline_objects)
def get_pipeline_definition(self, pipeline_id):
pipeline = self.get_pipeline(pipeline_id)
return pipeline.objects
def describe_objects(self, object_ids, pipeline_id):
pipeline = self.get_pipeline(pipeline_id)
pipeline_objects = [
pipeline_object for pipeline_object in pipeline.objects
if pipeline_object.object_id in object_ids
]
return pipeline_objects
def activate_pipeline(self, pipeline_id):
pipeline = self.get_pipeline(pipeline_id)
pipeline.activate()
datapipeline_backends = {}
for region in boto.datapipeline.regions():
datapipeline_backends[region.name] = DataPipelineBackend()
| apache-2.0 |
annayqho/TheCannon | code/lamost/mass_age/cn/run_full_test.py | 1 | 4784 | """
Run the test step on all the LAMOST DR2 objects.
You have to run this script on aida42082
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '/home/annaho/TheCannon')
#sys.path.insert(0, '/home/annaho')
#from lamost import load_spectra
#import dataset
#import model
from TheCannon import dataset
from TheCannon import model
#from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
from pull_data import find_colors, apply_mask
SPEC_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels"
COL_DIR = "/home/annaho/TheCannon/data/lamost"
MODEL_DIR = "."
def test_step_iteration(ds, m, starting_guess):
errs, chisq = m.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def test_step(date):
wl = np.load("%s/wl_cols.npz" %MODEL_DIR)['arr_0']
test_ID = np.load("%s/output/%s_ids.npz" %(SPEC_DIR, date))['arr_0']
print(str(len(test_ID)) + " objects")
test_flux_temp = np.load("%s/output/%s_norm.npz" %(SPEC_DIR,date))['arr_0']
test_ivar_temp = np.load("%s/output/%s_norm.npz" %(SPEC_DIR,date))['arr_1']
# Mask
mask = np.load("mask.npz")['arr_0']
test_ivar_masked = apply_mask(wl[0:3626], test_ivar_temp, mask)
# Append colors
col = np.load(COL_DIR + "/" + date + "_col.npz")['arr_0']
col_ivar = np.load(COL_DIR + "/" + date + "_col_ivar.npz")['arr_0']
bad_flux = np.logical_or(np.isnan(col), col==np.inf)
col[bad_flux] = 1.0
col_ivar[bad_flux] = 0.0
bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
col_ivar[bad_ivar] = 0.0
test_flux = np.hstack((test_flux_temp, col.T))
test_ivar = np.hstack((test_ivar_temp, col_ivar.T))
lamost_label = np.load("%s/output/%s_tr_label.npz" %(SPEC_DIR,date))['arr_0']
apogee_label = np.load("./ref_label.npz")['arr_0']
ds = dataset.Dataset(wl, test_ID, test_flux[0:2,:], test_ivar[0:2,:],
lamost_label, test_ID, test_flux, test_ivar)
#np.savez(COL_DIR + "/%s_test_flux.npz" %date, ds.test_flux)
#np.savez(COL_DIR + "/%s_test_ivar.npz" %date, ds.test_ivar)
np.savez(COL_DIR + "/%s_test_snr.npz" %date, ds.test_SNR)
np.savez(COL_DIR + "/%s_test_id.npz" %date, ds.test_ID)
ds.set_label_names(
['T_{eff}', '\log g', '[Fe/H]', '[C/M]', '[N/M]', '[\\alpha/Fe]', 'A_k'])
# m = model.CannonModel(2)
# m.coeffs = np.load("./coeffs.npz")['arr_0']
# m.scatters = np.load("./scatters.npz")['arr_0']
# m.chisqs = np.load("./chisqs.npz")['arr_0']
# m.pivots = np.load("./pivots.npz")['arr_0']
#
# nlabels = len(m.pivots)
# nobj = len(test_ID)
#
# nguesses = 7
# choose = np.random.randint(0,nobj,size=nguesses)
# print(apogee_label.shape)
# print(choose.shape)
# print(m.pivots.shape)
# starting_guesses = apogee_label[choose]-m.pivots
#
# labels = np.zeros((nguesses, nobj, nlabels))
# chisq = np.zeros((nguesses, nobj))
# errs = np.zeros(labels.shape)
#
# for ii,guess in enumerate(starting_guesses):
# a,b,c = test_step_iteration(ds,m,starting_guesses[ii])
# labels[ii,:] = a
# chisq[ii,:] = b
# errs[ii,:] = c
#
# np.savez(COL_DIR + "/%s_cannon_label_guesses.npz" %date, labels)
# np.savez(COL_DIR + "/%s_cannon_chisq_guesses.npz" %date, labels)
#
# choose = np.argmin(chisq, axis=0)
# best_chisq = np.min(chisq, axis=0)
# best_labels = np.zeros((nobj, nlabels))
# best_errs = np.zeros(best_labels.shape)
# for jj,val in enumerate(choose):
# best_labels[jj,:] = labels[:,jj,:][val]
# best_errs[jj,:] = errs[:,jj,:][val]
#
# np.savez(COL_DIR + "/%s_all_cannon_labels.npz" %date, best_labels)
# np.savez(COL_DIR + "/%s_cannon_label_chisq.npz" %date, best_chisq)
# np.savez(COL_DIR + "/%s_cannon_label_errs.npz" %date, best_errs)
#
# ds.test_label_vals = best_labels
# #ds.diagnostics_survey_labels(figname="%s_survey_labels_triangle.png" %date)
# ds.test_label_vals = best_labels[:,0:3]
# ds.set_label_names(['T_{eff}', '\log g', '[M/H]'])
# ds.diagnostics_1to1(figname = COL_DIR + "/%s_1to1_test_label" %date)
#
if __name__=="__main__":
dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
dates = np.array(dates)
dates = np.delete(dates, np.where(dates=='.directory')[0][0])
dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
for date in dates:
print("running %s" %date)
#if glob.glob(COL_DIR + "/%s_all_cannon_labels.npz" %date):
# print("already done")
#else:
test_step(date)
| mit |
qbdsoft/pip | pip/_vendor/requests/packages/chardet/__init__.py | 1778 | 1295 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| mit |
vgm64/highway-radio | parsing/parse_html_to_lists.py | 1 | 7311 |
from bs4 import BeautifulSoup
import re
import MySQLdb
def open_html(filename):
infile = open(filename)
raw_text = infile.read()
# I don't know where, but SOMEWHERE this onmousedown or ondblclick totally
# fucks up parsing.
raw_text = re.sub('onmousedown=".*?"', '', raw_text)
raw_text = re.sub('ondblclick=".*?"', '', raw_text)
soup = BeautifulSoup(raw_text)
return soup
def yield_rows(soup):
for tr in soup.table.find_all('tr')[1:]:
yield tr
def parse_row(row):
results = []
for item in row.find_all('td'):
#print item.contents, '----', item
if len(item.contents) > 0:
results.append(item.contents[0].encode('ascii', 'ignore'))
else:
results.append(None)
#print '@@@', results
return results
def parse_html_to_lists(filename):
soup = open_html(filename)
row_iterator = yield_rows(soup)
#td_iterator = yield_tds(soup)
heading = 'frequency p location reg la program regprogram pty mod power dir pol height coord ant haat rds_id rds_reg pi_id pi_reg remarks powertrp'
all_results = []
for row in row_iterator:
results = parse_row(row)
all_results.append(results)
return all_results
#def get_database_conn():
#connection = MySQLdb.connect('localhost', 'root', '', 'insight')
#cursor = connection.cursor()
#return connection, cursor
def insert_into_msqyl(data, reset=True):
connection = MySQLdb.connect('localhost', 'root', '', 'insight')
with connection:
cursor = connection.cursor()
if reset:
cursor.execute("DROP TABLE IF EXISTS fmlist;")
cursor.execute("CREATE TABLE fmlist (id INT PRIMARY KEY AUTO_INCREMENT, \
frequency FLOAT, \
p CHAR(10), \
location CHAR(100), \
reg CHAR(50), \
la CHAR(50), \
program CHAR(50), \
regprogram CHAR(100), \
pty CHAR(100), \
modmod CHAR(100), \
power FLOAT, \
dir CHAR(50), \
pol CHAR(50), \
height FLOAT, \
coord CHAR(50), \
ant CHAR(50), \
haat CHAR(50), \
rds_id CHAR(100), \
rds_reg CHAR(50), \
pi_id CHAR(50), \
pi_reg CHAR(50), \
remarks CHAR(50), \
powertrp CHAR(50), \
cat CHAR(100), \
scs CHAR(50), \
lat FLOAT, \
lon FLOAT)")
base_query = """INSERT INTO fmlist (frequency, p, location, reg, la, program, regprogram, pty, modmod, power, dir, pol, height, coord, ant, haat, rds_id, rds_reg, pi_id, pi_reg, remarks, powertrp, cat, scs, lat, lon) VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """
#print "Entering execute for loop"
#for datum in data:
#print datum
#cursor.execute(base_query, datum)
cursor.executemany(base_query, data)
connection.commit()
def clean(data):
for antenna_record in data:
# Remove a stray newline.
antenna_record.pop(-1)
# Tweak call_sign.
call_sign = antenna_record[5]
if not call_sign:
call_sign = 'XXXX'
call_sign = call_sign.split(' ')[0]
antenna_record[5] = call_sign
# Tweak categories
remark = antenna_record[-2]
remark = category_swap(remark, 'Spanis', 'Spanish')
remark = category_swap(remark, 'Jaz', 'Jazz')
remark = category_swap(remark, '80', '80s')
remark = category_swap(remark, '90', '90s')
remark = category_swap(remark, 'Adult', 'Adult Contemporary')
remark = category_swap(remark, 'Altern', 'Alternative')
remark = category_swap(remark, 'CHR', 'Top Hits')
remark = category_swap(remark, 'Christ', 'Christian')
remark = category_swap(remark, 'Classi', 'Classical')
remark = category_swap(remark, 'Colleg', 'College Radio')
remark = category_swap(remark, 'commun', 'Community Radio')
remark = category_swap(remark, 'Commun', 'Community Radio')
remark = category_swap(remark, 'Countr', 'Country')
remark = category_swap(remark, 'Contem', 'Top Hits')
remark = category_swap(remark, 'dance', 'Dance')
remark = category_swap(remark, 'Eclect', 'Eclectic')
remark = category_swap(remark, 'Hot AC', 'Adult Contemporary')
remark = category_swap(remark, 'High S', 'High School Radio')
remark = category_swap(remark, 'Hip Ho', 'Hip Hop')
remark = category_swap(remark, 'News', 'News')
remark = category_swap(remark, 'NPR', 'Public Radio')
remark = category_swap(remark, 'Region', 'Regional Mexican')
remark = category_swap(remark, 'Relig', 'Religious')
remark = category_swap(remark, 'Rhyth', 'Top Hits')
remark = category_swap(remark, 'Smoot', 'Jazz')
remark = category_swap(remark, 'Soft A', 'Adult Contemporary')
remark = category_swap(remark, 'Soft R', 'Rock')
remark = category_swap(remark, 'South', 'Gospel')
remark = category_swap(remark, 'Variet', 'Variety')
remark = category_swap(remark, 'Top', 'Top Hits')
remark = category_swap(remark, 'AC', 'Adult Contemporary')
remark = category_swap(remark, 'AAA', '')
remark = category_swap(remark, 'jazz', 'Jazz')
remark = category_swap(remark, 'Indie', 'Indie')
remark = category_swap(remark, 'Hopi', 'Native American')
remark = category_swap(remark, 'Urban', 'Urban')
remark = category_swap(remark, 'easy', 'Easy Listening')
remark = category_swap(remark, 'PR', 'Public Radio')
remark = category_swap(remark, 'classi', 'News')
remark = category_swap(remark, 'Wind', 'Native American')
remark = category_swap(remark, 'Freefo', 'Freeform')
remark = category_swap(remark, 'Univ', 'University Radio')
#if not remark:
#continue
#elif 'Spanis' in remark:
#remark = 'Spanish'
#elif 'Jaz' in remark:
#remark = 'Jazz'
#elif '80' in remark:
antenna_record.append(remark)
return
def category_swap(partial_category, match_term, return_term):
if partial_category and match_term in partial_category:
return return_term
else:
return partial_category
def massage(data):
for antenna_record in data:
# Add in floating point coordinates.
coords = antenna_record[13]
if coords:
lon,lat = coords.lower().split("/")
lat = int(lat.split("n")[0]) + float(lat.split("n")[1])/60.
lon = int(lon.split("w")[0]) + float(lon.split("w")[1])/60.
lon = -lon
else:
lat, lon = None, None
antenna_record.append(lat)
antenna_record.append(lon)
# Get the call sign before any dashes.
call_sign = antenna_record[5]
call_sign = call_sign.split('-')[0]
antenna_record.insert(-2, call_sign)
return
if __name__ == '__main__':
#print "Parsing html"
#fmlist_data = parse_html_to_lists('/Users/mwoods/Work/OldJobs/Insight/Radio/highway-radio/raw_data/fmlist/fmlist_000001_0100.html')
#print "Clean data"
#clean(fmlist_data)
#print "Massaging data"
#massage(fmlist_data)
#print "INserting into db"
#insert_into_msqyl(fmlist_data)
#print "Done"
import glob
reset_table = True
for filename in glob.glob('/Users/mwoods/Work/OldJobs/Insight/Radio/highway-radio/raw_data/fmlist/fmlist*.html'):
print "Working on", filename
fmlist_data = parse_html_to_lists(filename)
clean(fmlist_data)
massage(fmlist_data)
insert_into_msqyl(fmlist_data, reset_table)
if reset_table:
reset_table = False
| mit |
waytai/django | django/middleware/cache.py | 372 | 7303 | """
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key, get_max_age, has_vary_header, learn_cache_key,
patch_response_headers,
)
class UpdateCacheMiddleware(object):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, '_cache_update_cache') and request._cache_update_cache
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code != 200:
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(object):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the request phase.
"""
def __init__(self):
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
"""
Checks whether the page is already cached and returns the cached
version if available.
"""
if request.method not in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, cache_timeout=None, **kwargs):
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs['key_prefix']
if key_prefix is None:
key_prefix = ''
except KeyError:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.key_prefix = key_prefix
try:
cache_alias = kwargs['cache_alias']
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
except KeyError:
cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache_alias = cache_alias
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.cache_timeout = cache_timeout
self.cache = caches[self.cache_alias]
| bsd-3-clause |
dwaynebailey/pootle | tests/models/virtualfolder.py | 7 | 7012 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from __future__ import absolute_import
from django.core.exceptions import ValidationError
import pytest
from pytest_pootle.factories import VirtualFolderDBFactory
from pootle_language.models import Language
from pootle_store.models import Store
from virtualfolder.models import VirtualFolder
@pytest.mark.django_db
def test_vfolder_priority_not_greater_than_zero(tp0):
"""Tests that the creation of a virtual folder fails if the provided
priority is not greater than zero.
"""
# Test priority less than zero.
vfolder_item = {
'name': "whatever",
'priority': -3,
'is_public': True,
'filter_rules': "browser/defines.po",
}
with pytest.raises(ValidationError) as excinfo:
VirtualFolder.objects.create(**vfolder_item)
assert u'Priority must be greater than zero.' in str(excinfo.value)
# Test zero priority.
vfolder_item['priority'] = 1
vfolder = VirtualFolder.objects.create(**vfolder_item)
vfolder.priority = 0
with pytest.raises(ValidationError) as excinfo:
vfolder.save()
assert u'Priority must be greater than zero.' in str(excinfo.value)
@pytest.mark.django_db
def test_vfolder_with_no_filter_rules():
"""Tests that the creation of a virtual folder fails if it doesn't have any
filter rules.
"""
vfolder_item = {
'name': "whatever",
'priority': 4,
'is_public': True,
'filter_rules': "",
}
with pytest.raises(ValidationError) as excinfo:
VirtualFolder.objects.create(**vfolder_item)
assert u'Some filtering rule must be specified.' in str(excinfo.value)
vfolder_item["filter_rules"] = "FOO"
vf = VirtualFolder.objects.create(**vfolder_item)
vf.filter_rules = ""
with pytest.raises(ValidationError) as excinfo:
vf.save()
assert u'Some filtering rule must be specified.' in str(excinfo.value)
@pytest.mark.django_db
def test_vfolder_membership(tp0, store0):
tp0_stores = ",".join(
p[len(tp0.pootle_path):]
for p in tp0.stores.values_list("pootle_path", flat=True))
vf0 = VirtualFolder.objects.create(
name="vf0",
title="the vf0",
filter_rules=store0.name)
vf0.projects.add(tp0.project)
vf0.languages.add(tp0.language)
vf0.save()
assert vf0.stores.count() == 1
assert vf0.stores.first() == store0
vf1 = VirtualFolder.objects.create(
name="vf1",
title="the vf1",
filter_rules=tp0_stores)
vf1.projects.add(tp0.project)
vf1.languages.add(tp0.language)
vf1.save()
assert (
list(vf1.stores.order_by("pk"))
== list(tp0.stores.order_by("pk")))
store_name = vf1.filter_rules.split(",")[0]
vf1.filter_rules = ",".join(vf1.filter_rules.split(",")[1:])
store = vf1.stores.get(name=store_name)
vf1.save()
assert store not in vf1.stores.all()
vf1.filter_rules = ",".join([store_name, vf1.filter_rules])
vf1.save()
assert store in vf1.stores.all()
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_store_priorities(project0):
# remove the default vfolders and update units to reset priorities
VirtualFolder.objects.all().delete()
assert all(
priority == 1
for priority
in Store.objects.values_list("priority", flat=True))
vfolder0 = VirtualFolderDBFactory(filter_rules="store0.po", name="FOO")
vfolder0.priority = 3
vfolder0.save()
vfolder0_stores = vfolder0.stores.values_list("pk", flat=True)
assert all(
priority == 3
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.values_list("priority", flat=True))
vfolder0.filter_rules = "store1.po"
vfolder0.save()
vfolder0_stores = vfolder0.stores.values_list("pk", flat=True)
assert all(
priority == 3
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.values_list("priority", flat=True))
vfolder1 = VirtualFolderDBFactory(
filter_rules="store1.po")
vfolder1.languages.add(*Language.objects.all())
vfolder1.projects.add(project0)
vfolder1.priority = 4
vfolder1.save()
vfolder1_stores = vfolder1.stores.values_list("pk", flat=True)
assert all(
priority == 4.0
for priority
in Store.objects.filter(id__in=vfolder1_stores)
.values_list("priority", flat=True))
assert all(
priority == 3.0
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.exclude(id__in=vfolder1_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.exclude(id__in=vfolder1_stores)
.values_list("priority", flat=True))
@pytest.mark.django_db
def test_virtualfolder_repr():
vf = VirtualFolderDBFactory(filter_rules="store0.po")
assert (
"<VirtualFolder: %s>" % (vf.name)
== repr(vf))
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_calc_priority(settings, store0):
vf = store0.vfolders.first()
vf.priority = 5
vf.save()
assert store0.calculate_priority() == 5.0
settings.INSTALLED_APPS.remove("virtualfolder")
assert store0.calculate_priority() == 1.0
settings.INSTALLED_APPS.append("virtualfolder")
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_membership_new_store(tp0):
vf0 = VirtualFolder.objects.create(
name="vf0",
title="the vf0",
priority=7.0,
all_languages=True,
all_projects=True,
filter_rules="wierd.po")
wierd_store = Store.objects.create(
parent=tp0.directory,
translation_project=tp0,
name="wierd.po")
wierd_store.set_priority()
assert wierd_store in vf0.stores.all()
assert Store.objects.get(pk=wierd_store.pk).priority == 7
normal_store = Store.objects.create(
parent=tp0.directory,
translation_project=tp0,
name="normal.po")
assert normal_store not in vf0.stores.all()
assert Store.objects.get(pk=normal_store.pk).priority == 1.0
vf0.delete()
assert Store.objects.get(pk=wierd_store.pk).priority == 1.0
| gpl-3.0 |
pkruskal/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
shaufi10/odoo | addons/portal_claim/__openerp__.py | 432 | 1643 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Claim',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds claim menu and features to your portal if claim and portal are installed.
==========================================================================================
""",
'author': 'OpenERP SA',
'depends': ['crm_claim','portal'],
'data': [
'portal_claim_view.xml',
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mcanthony/nupic | src/nupic/support/datafiles.py | 40 | 7949 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# TODO for NUPIC 2 -- document the interface!
# TODO for NuPIC 2 -- should this move to inferenceanalysis?
def _calculateColumnsFromLine(line):
if "," in line:
splitLine = line.strip().split(",")
n = len(splitLine)
if n:
if not splitLine[-1].strip():
return n-1
else:
return n
else:
return 0
else:
# Too flexible.
# return len([x for x in line.strip().split() if x != ","])
return len(line.strip().split())
def _isComment(strippedLine):
if strippedLine:
return strippedLine.startswith("#")
else:
return True
def _calculateColumnsFromFile(f, format, rewind):
# Calculate the number of columns.
# We will put more trust in the second line that the first, in case the
# first line includes header entries.
if format not in [0, 2, 3]:
raise RuntimeError("Supported formats are 0, 2, and 3.")
if format == 0:
line0 = f.readline()
csplit = line0.split()
if len(csplit) != 1:
raise RuntimeError("Expected first line of data file to "
"contain a single number of columns. "
" Found %d fields" % len(csplit))
try:
numColumns = int(csplit[0])
except:
raise RuntimeError("Expected first line of data file to "
"contain a single number of columns. Found '%s'" % csplit[0])
if rewind:
f.seek(0)
return numColumns
elif format == 2:
numColumns = 0
numLinesRead = 0
for line in f:
strippedLine = line.strip()
if not _isComment(strippedLine):
curColumns = _calculateColumnsFromLine(strippedLine)
numLinesRead += 1
if numColumns and (numColumns != curColumns):
raise RuntimeError("Different lines have different "
"numbers of columns.")
else:
numColumns = curColumns
if numLinesRead > 1:
break
if rewind:
f.seek(0)
return numColumns
# CSV file: we'll just check the first line
elif format == 3:
strippedLine = f.readline().strip()
numColumns = calculateColumnsFromLine(strippedLine)
if rewind:
f.seek(0)
return numColumns
def processCategoryFile(f, format, categoryColumn=None, categoryColumns=None, count=1):
"""Read the data out of the given category file, returning a tuple
(categoryCount, listOfCategories)
@param f A file-like object containing the category info.
@param format The format of the category file. TODO: describe.
@param categoryColumn If non-None, this is the column number (zero-based)
where the category info starts in the file. If
None, indicates that the file only contains category
information (same as passing 0, but allows some
extra sanity checking).
@param categoryColumns Indicates how many categories are active per
timepoint (how many elements wide the category info
is). If 0, we'll determine this from the file. If
None (the default), means that the category info
is 1 element wide, and that the list we return
will just be a list of ints (rather than a list of
lists)
@param count Determines the size of chunks that will be aggregated
into a single entry. The default is 1, so each entry
from the file will be represented in the result. If
count > 1 then 'count' categories (all identical) will
be collapsed into a single entry. This is helpful for
aggregating explorers like EyeMovements where multiple
presentaions are conceptually the same item.
@return categoryCount The number of categories (aka maxCat + 1)
@return allCategories A list of the categories read in, with one item per
time point. If 'categoryColumns' is None, each item
will be an int. Otherwise, each item will be a list
of ints. If count > 1 then the categories will be
aggregated, so that each chunk of 'count' categories
will result in only one entry (all categories in a chunk
must be identical)
"""
calculatedCategoryColumns = _calculateColumnsFromFile(f, format=format,
rewind=(format==2 or format==3))
# If the user passed categoryColumns as None, we'll return a list of ints
# directly; otherwise we'll return a list of lists...
wantListOfInts = (categoryColumns is None)
# Get arguments sanitized...
if categoryColumns == 0:
# User has told us to auto-calculate the # of categories / time point...
# If categoryColumn is not 0 or None, that's an error...
if categoryColumn:
raise RuntimeError("You can't specify an offset for category data "
"if using automatic width.")
categoryColumn = 0
categoryColumns = calculatedCategoryColumns
elif categoryColumns is None:
# User has told us that there's just one category...
if categoryColumn is None:
if calculatedCategoryColumns != 1:
raise RuntimeError("Category file must contain exactly one column.")
categoryColumn = 0
categoryColumns = 1
else:
# User specified exactly how big the category data is...
if (categoryColumns + categoryColumn) > calculatedCategoryColumns:
raise RuntimeError("Not enough categories in file")
maxCategory = 0
allCategories = []
for line in f:
strippedLine = line.strip()
if not _isComment(strippedLine):
if wantListOfInts:
category = int(strippedLine.split()[categoryColumn])
allCategories.append(category)
maxCategory = max(maxCategory, category)
else:
categories = strippedLine.split()[categoryColumn:
categoryColumn+categoryColumns]
categories = map(int, categories)
allCategories.append(categories)
maxCategory = max(maxCategory, max(categories))
categoryCount = maxCategory + 1
# Aggregate categories
result = []
if count > 1:
# Make sure there the number of categories can be aggregated
# exactly by chunks of size 'count'
assert len(allCategories) % count == 0
start = 0
for i in range(len(allCategories) / count):
end = start + count
# Make sure each chunk of size 'count' contains exactly one category
assert (min(allCategories[start:end]) == max(allCategories[start:end]))
# Add just one entry for each chunk
result.append(allCategories[start])
start = end
else:
result = allCategories
return categoryCount, result
| agpl-3.0 |
ylatuya/jhbuild | jhbuild/modtypes/distutils.py | 2 | 4257 | # jhbuild - a build script for GNOME 1.x and 2.x
# Copyright (C) 2001-2006 James Henstridge
#
# distutils.py: Python distutils module type definitions.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__metaclass__ = type
import os
from jhbuild.errors import BuildStateError
from jhbuild.modtypes import \
Package, DownloadableModule, get_dependencies, get_branch, register_module_type
__all__ = [ 'DistutilsModule' ]
class DistutilsModule(Package, DownloadableModule):
"""Base type for modules that are distributed with a Python
Distutils style setup.py."""
type = 'distutils'
PHASE_CHECKOUT = DownloadableModule.PHASE_CHECKOUT
PHASE_FORCE_CHECKOUT = DownloadableModule.PHASE_FORCE_CHECKOUT
PHASE_BUILD = 'build'
PHASE_INSTALL = 'install'
def __init__(self, name, branch,
dependencies = [], after = [], suggests = [],
supports_non_srcdir_builds = True):
Package.__init__(self, name, dependencies, after, suggests)
self.branch = branch
self.supports_non_srcdir_builds = supports_non_srcdir_builds
def get_srcdir(self, buildscript):
return self.branch.srcdir
def get_builddir(self, buildscript):
if buildscript.config.buildroot and self.supports_non_srcdir_builds:
d = buildscript.config.builddir_pattern % (
self.branch.checkoutdir or self.branch.get_module_basename())
return os.path.join(buildscript.config.buildroot, d)
else:
return self.get_srcdir(buildscript)
def do_build(self, buildscript):
buildscript.set_action(_('Building'), self)
srcdir = self.get_srcdir(buildscript)
builddir = self.get_builddir(buildscript)
python = os.environ.get('PYTHON', 'python')
cmd = [python, 'setup.py', 'build']
if srcdir != builddir:
cmd.extend(['--build-base', builddir])
buildscript.execute(cmd, cwd = srcdir, extra_env = self.extra_env)
do_build.depends = [PHASE_CHECKOUT]
do_build.error_phase = [PHASE_FORCE_CHECKOUT]
def do_install(self, buildscript):
buildscript.set_action(_('Installing'), self)
srcdir = self.get_srcdir(buildscript)
builddir = self.get_builddir(buildscript)
python = os.environ.get('PYTHON', 'python')
cmd = [python, 'setup.py']
if srcdir != builddir:
cmd.extend(['build', '--build-base', builddir])
cmd.extend(['install', '--prefix', buildscript.config.prefix])
buildscript.execute(cmd, cwd = srcdir, extra_env = self.extra_env)
buildscript.packagedb.add(self.name, self.get_revision() or '')
do_install.depends = [PHASE_BUILD]
def xml_tag_and_attrs(self):
return 'distutils', [('id', 'name', None),
('supports-non-srcdir-builds',
'supports_non_srcdir_builds', True)]
def parse_distutils(node, config, uri, repositories, default_repo):
id = node.getAttribute('id')
supports_non_srcdir_builds = True
if node.hasAttribute('supports-non-srcdir-builds'):
supports_non_srcdir_builds = \
(node.getAttribute('supports-non-srcdir-builds') != 'no')
dependencies, after, suggests = get_dependencies(node)
branch = get_branch(node, repositories, default_repo, config)
return DistutilsModule(id, branch,
dependencies = dependencies, after = after,
suggests = suggests,
supports_non_srcdir_builds = supports_non_srcdir_builds)
register_module_type('distutils', parse_distutils)
| gpl-2.0 |
darksky83/plugin.video.tviplayer | resources/directory.py | 1 | 3837 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Author: darksky83
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import urllib
import xbmcgui
import xbmcplugin
from common_variables import *
# Function to add a Show directory
def addprograma(name, url, mode, iconimage, number_of_items, information, fanart_image=''):
if (fanart_image == ''):
if iconimage:
fanart_image = iconimage
u = sys.argv[0] + "?url=" + urllib.quote_plus(url) + "&mode=" + str(mode) + "&name=" + urllib.quote_plus(
name) + "&iconimage=" + urllib.quote_plus(fanart_image)
try:
u += "&plot=" + urllib.quote_plus(information["plot"])
except:
pass
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
if fanart_image == '':
liz.setProperty('fanart_image', os.path.join(artfolder, 'fanart.png'))
else:
liz.setProperty('fanart_image', fanart_image)
liz.setInfo(type="Video", infoLabels=information)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True,
totalItems=number_of_items)
return ok
# Function to add a Episode
def addepisode(name, url, mode, iconimage, number_of_items, information, fanart_image):
u = sys.argv[0] + "?url=" + urllib.quote_plus(url) + "&mode=" + str(mode) + "&name=" + urllib.quote_plus(
name) + "&iconimage=" + urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
if fanart_image == '':
liz.setProperty('fanart_image', os.path.join(artfolder, 'fanart.png'))
else:
liz.setProperty('fanart_image', fanart_image)
liz.setInfo(type="Video", infoLabels=information)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=False,
totalItems=number_of_items)
return ok
# Function to add a video/audio Link
def addLink(name, url, iconimage, number_of_items):
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setProperty('fanart_image', os.path.join(artfolder, 'fanart.png'))
liz.setInfo(type="Video", infoLabels={"Title": name})
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=liz, isFolder=False,
totalItems=number_of_items)
return ok
# Function to add a regular directory
def addDir(name, url, mode, iconimage, number_of_items, pasta=True, informacion=None):
u = sys.argv[0] + "?url=" + urllib.quote_plus(url) + "&mode=" + str(mode) + "&name=" + urllib.quote_plus(name)
try:
u += "&plot=" + urllib.quote_plus(informacion["plot"])
except:
pass
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setProperty('fanart_image', os.path.join(artfolder, 'fanart.png'))
liz.setInfo(type="Video", infoLabels={"Title": name})
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=pasta,
totalItems=number_of_items)
return ok
| gpl-2.0 |
alesnav/p2ptv-pi | acestream/ACEStream/Core/DecentralizedTracking/pymdht/core/logging_conf.py | 4 | 1200 | #Embedded file name: ACEStream\Core\DecentralizedTracking\pymdht\core\logging_conf.pyo
import logging
import os
FORMAT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s - %(funcName)s()\n%(message)s\n'
try:
devnullstream = open('/dev/null', 'w')
except:
from ACEStream.Utilities.NullFile import *
devnullstream = NullFile()
logging.basicConfig(level=logging.CRITICAL, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', stream=devnullstream)
def testing_setup(module_name):
logger = logging.getLogger('dht')
logger.setLevel(logging.DEBUG)
filename = ''.join((str(module_name), '.log'))
logger_file = os.path.join('test_logs', filename)
logger_conf = logging.FileHandler(logger_file, 'w')
logger_conf.setLevel(logging.DEBUG)
logger_conf.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(logger_conf)
def setup(logs_path, logs_level):
logger = logging.getLogger('dht')
logger.setLevel(logs_level)
logger_conf = logging.FileHandler(os.path.join(logs_path, 'dht.log'), 'w')
logger_conf.setLevel(logs_level)
logger_conf.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(logger_conf)
| mit |
akarol/cfme_tests | scripts/install_snmp_listener.py | 5 | 2428 | #!/usr/bin/env python2
"""SSH into a running appliance and install SNMP listener."""
import argparse
import requests
import sys
from cfme.utils.conf import credentials
from cfme.utils.path import scripts_data_path
from cfme.utils.ssh import SSHClient
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('address', help='hostname or ip address of target appliance')
args = parser.parse_args()
ssh_kwargs = {
'username': credentials['ssh']['username'],
'password': credentials['ssh']['password'],
'hostname': args.address
}
# Init SSH client
with SSHClient(**ssh_kwargs) as ssh_client:
snmp_path = scripts_data_path.join("snmp")
# Copy
print("Copying files")
ssh_client.put_file(snmp_path.join("snmp_listen.rb").strpath, "/root/snmp_listen.rb")
ssh_client.put_file(snmp_path.join("snmp_listen.sh").strpath, "/root/snmp_listen.sh")
# Enable after startup
print("Enabling after startup")
status = ssh_client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0]
if status != 0:
ssh_client.run_command("echo 'cd /root/ && ./snmp_listen.sh start' >> /etc/rc.local")
assert ssh_client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] == 0, \
"Could not enable!"
# Run!
print("Starting listener")
assert ssh_client.run_command("cd /root/ && ./snmp_listen.sh start")[0] == 0, \
"Could not start!"
# Open the port if not opened
print("Opening the port in iptables")
status = ssh_client.run_command("grep '--dport 8765' /etc/sysconfig/iptables")[0]
if status != 0:
# append after the 5432 entry
ssh_client.run_command(
"sed -i '/--dport 5432/a -A INPUT -p tcp -m tcp --dport 8765 -j ACCEPT' "
"/etc/sysconfig/iptables"
)
ssh_client.run_command("systemctl restart iptables")
# last ssh command, close
# Check if accessible
try:
requests.get("http://{}:8765/".format(args.address))
except requests.exceptions.ConnectionError:
print("Could not detect running listener!")
exit(2)
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
midnightmagic/p2pool | p2pool/util/forest.py | 281 | 13557 | '''
forest data structure
'''
import itertools
from p2pool.util import skiplist, variable
class TrackerSkipList(skiplist.SkipList):
def __init__(self, tracker):
skiplist.SkipList.__init__(self)
self.tracker = tracker
self.tracker.removed.watch_weakref(self, lambda self, item: self.forget_item(item.hash))
def previous(self, element):
return self.tracker._delta_type.from_element(self.tracker.items[element]).tail
class DistanceSkipList(TrackerSkipList):
def get_delta(self, element):
return element, 1, self.previous(element)
def combine_deltas(self, (from_hash1, dist1, to_hash1), (from_hash2, dist2, to_hash2)):
if to_hash1 != from_hash2:
raise AssertionError()
return from_hash1, dist1 + dist2, to_hash2
def initial_solution(self, start, (n,)):
return 0, start
def apply_delta(self, (dist1, to_hash1), (from_hash2, dist2, to_hash2), (n,)):
if to_hash1 != from_hash2:
raise AssertionError()
return dist1 + dist2, to_hash2
def judge(self, (dist, hash), (n,)):
if dist > n:
return 1
elif dist == n:
return 0
else:
return -1
def finalize(self, (dist, hash), (n,)):
assert dist == n
return hash
def get_attributedelta_type(attrs): # attrs: {name: func}
class ProtoAttributeDelta(object):
__slots__ = ['head', 'tail'] + attrs.keys()
@classmethod
def get_none(cls, element_id):
return cls(element_id, element_id, **dict((k, 0) for k in attrs))
@classmethod
def from_element(cls, item):
return cls(item.hash, item.previous_hash, **dict((k, v(item)) for k, v in attrs.iteritems()))
@staticmethod
def get_head(item):
return item.hash
@staticmethod
def get_tail(item):
return item.previous_hash
def __init__(self, head, tail, **kwargs):
self.head, self.tail = head, tail
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __add__(self, other):
assert self.tail == other.head
return self.__class__(self.head, other.tail, **dict((k, getattr(self, k) + getattr(other, k)) for k in attrs))
def __sub__(self, other):
if self.head == other.head:
return self.__class__(other.tail, self.tail, **dict((k, getattr(self, k) - getattr(other, k)) for k in attrs))
elif self.tail == other.tail:
return self.__class__(self.head, other.head, **dict((k, getattr(self, k) - getattr(other, k)) for k in attrs))
else:
raise AssertionError()
def __repr__(self):
return '%s(%r, %r%s)' % (self.__class__, self.head, self.tail, ''.join(', %s=%r' % (k, getattr(self, k)) for k in attrs))
ProtoAttributeDelta.attrs = attrs
return ProtoAttributeDelta
AttributeDelta = get_attributedelta_type(dict(
height=lambda item: 1,
))
class TrackerView(object):
def __init__(self, tracker, delta_type):
self._tracker = tracker
self._delta_type = delta_type
self._deltas = {} # item_hash -> delta, ref
self._reverse_deltas = {} # ref -> set of item_hashes
self._ref_generator = itertools.count()
self._delta_refs = {} # ref -> delta
self._reverse_delta_refs = {} # delta.tail -> ref
self._tracker.remove_special.watch_weakref(self, lambda self, item: self._handle_remove_special(item))
self._tracker.remove_special2.watch_weakref(self, lambda self, item: self._handle_remove_special2(item))
self._tracker.removed.watch_weakref(self, lambda self, item: self._handle_removed(item))
def _handle_remove_special(self, item):
delta = self._delta_type.from_element(item)
if delta.tail not in self._reverse_delta_refs:
return
# move delta refs referencing children down to this, so they can be moved up in one step
for x in list(self._reverse_deltas.get(self._reverse_delta_refs.get(delta.head, object()), set())):
self.get_last(x)
assert delta.head not in self._reverse_delta_refs, list(self._reverse_deltas.get(self._reverse_delta_refs.get(delta.head, object()), set()))
if delta.tail not in self._reverse_delta_refs:
return
# move ref pointing to this up
ref = self._reverse_delta_refs[delta.tail]
cur_delta = self._delta_refs[ref]
assert cur_delta.tail == delta.tail
self._delta_refs[ref] = cur_delta - delta
assert self._delta_refs[ref].tail == delta.head
del self._reverse_delta_refs[delta.tail]
self._reverse_delta_refs[delta.head] = ref
def _handle_remove_special2(self, item):
delta = self._delta_type.from_element(item)
if delta.tail not in self._reverse_delta_refs:
return
ref = self._reverse_delta_refs.pop(delta.tail)
del self._delta_refs[ref]
for x in self._reverse_deltas.pop(ref):
del self._deltas[x]
def _handle_removed(self, item):
delta = self._delta_type.from_element(item)
# delete delta entry and ref if it is empty
if delta.head in self._deltas:
delta1, ref = self._deltas.pop(delta.head)
self._reverse_deltas[ref].remove(delta.head)
if not self._reverse_deltas[ref]:
del self._reverse_deltas[ref]
delta2 = self._delta_refs.pop(ref)
del self._reverse_delta_refs[delta2.tail]
def get_height(self, item_hash):
return self.get_delta_to_last(item_hash).height
def get_work(self, item_hash):
return self.get_delta_to_last(item_hash).work
def get_last(self, item_hash):
return self.get_delta_to_last(item_hash).tail
def get_height_and_last(self, item_hash):
delta = self.get_delta_to_last(item_hash)
return delta.height, delta.tail
def _get_delta(self, item_hash):
if item_hash in self._deltas:
delta1, ref = self._deltas[item_hash]
delta2 = self._delta_refs[ref]
res = delta1 + delta2
else:
res = self._delta_type.from_element(self._tracker.items[item_hash])
assert res.head == item_hash
return res
def _set_delta(self, item_hash, delta):
other_item_hash = delta.tail
if other_item_hash not in self._reverse_delta_refs:
ref = self._ref_generator.next()
assert ref not in self._delta_refs
self._delta_refs[ref] = self._delta_type.get_none(other_item_hash)
self._reverse_delta_refs[other_item_hash] = ref
del ref
ref = self._reverse_delta_refs[other_item_hash]
ref_delta = self._delta_refs[ref]
assert ref_delta.tail == other_item_hash
if item_hash in self._deltas:
prev_ref = self._deltas[item_hash][1]
self._reverse_deltas[prev_ref].remove(item_hash)
if not self._reverse_deltas[prev_ref] and prev_ref != ref:
self._reverse_deltas.pop(prev_ref)
x = self._delta_refs.pop(prev_ref)
self._reverse_delta_refs.pop(x.tail)
self._deltas[item_hash] = delta - ref_delta, ref
self._reverse_deltas.setdefault(ref, set()).add(item_hash)
def get_delta_to_last(self, item_hash):
assert isinstance(item_hash, (int, long, type(None)))
delta = self._delta_type.get_none(item_hash)
updates = []
while delta.tail in self._tracker.items:
updates.append((delta.tail, delta))
this_delta = self._get_delta(delta.tail)
delta += this_delta
for update_hash, delta_then in updates:
self._set_delta(update_hash, delta - delta_then)
return delta
def get_delta(self, item, ancestor):
assert self._tracker.is_child_of(ancestor, item)
return self.get_delta_to_last(item) - self.get_delta_to_last(ancestor)
class Tracker(object):
def __init__(self, items=[], delta_type=AttributeDelta):
self.items = {} # hash -> item
self.reverse = {} # delta.tail -> set of item_hashes
self.heads = {} # head hash -> tail_hash
self.tails = {} # tail hash -> set of head hashes
self.added = variable.Event()
self.remove_special = variable.Event()
self.remove_special2 = variable.Event()
self.removed = variable.Event()
self.get_nth_parent_hash = DistanceSkipList(self)
self._delta_type = delta_type
self._default_view = TrackerView(self, delta_type)
for item in items:
self.add(item)
def __getattr__(self, name):
attr = getattr(self._default_view, name)
setattr(self, name, attr)
return attr
def add(self, item):
assert not isinstance(item, (int, long, type(None)))
delta = self._delta_type.from_element(item)
if delta.head in self.items:
raise ValueError('item already present')
if delta.head in self.tails:
heads = self.tails.pop(delta.head)
else:
heads = set([delta.head])
if delta.tail in self.heads:
tail = self.heads.pop(delta.tail)
else:
tail = self.get_last(delta.tail)
self.items[delta.head] = item
self.reverse.setdefault(delta.tail, set()).add(delta.head)
self.tails.setdefault(tail, set()).update(heads)
if delta.tail in self.tails[tail]:
self.tails[tail].remove(delta.tail)
for head in heads:
self.heads[head] = tail
self.added.happened(item)
def remove(self, item_hash):
assert isinstance(item_hash, (int, long, type(None)))
if item_hash not in self.items:
raise KeyError()
item = self.items[item_hash]
del item_hash
delta = self._delta_type.from_element(item)
children = self.reverse.get(delta.head, set())
if delta.head in self.heads and delta.tail in self.tails:
tail = self.heads.pop(delta.head)
self.tails[tail].remove(delta.head)
if not self.tails[delta.tail]:
self.tails.pop(delta.tail)
elif delta.head in self.heads:
tail = self.heads.pop(delta.head)
self.tails[tail].remove(delta.head)
if self.reverse[delta.tail] != set([delta.head]):
pass # has sibling
else:
self.tails[tail].add(delta.tail)
self.heads[delta.tail] = tail
elif delta.tail in self.tails and len(self.reverse[delta.tail]) <= 1:
heads = self.tails.pop(delta.tail)
for head in heads:
self.heads[head] = delta.head
self.tails[delta.head] = set(heads)
self.remove_special.happened(item)
elif delta.tail in self.tails and len(self.reverse[delta.tail]) > 1:
heads = [x for x in self.tails[delta.tail] if self.is_child_of(delta.head, x)]
self.tails[delta.tail] -= set(heads)
if not self.tails[delta.tail]:
self.tails.pop(delta.tail)
for head in heads:
self.heads[head] = delta.head
assert delta.head not in self.tails
self.tails[delta.head] = set(heads)
self.remove_special2.happened(item)
else:
raise NotImplementedError()
self.items.pop(delta.head)
self.reverse[delta.tail].remove(delta.head)
if not self.reverse[delta.tail]:
self.reverse.pop(delta.tail)
self.removed.happened(item)
def get_chain(self, start_hash, length):
assert length <= self.get_height(start_hash)
for i in xrange(length):
item = self.items[start_hash]
yield item
start_hash = self._delta_type.get_tail(item)
def is_child_of(self, item_hash, possible_child_hash):
height, last = self.get_height_and_last(item_hash)
child_height, child_last = self.get_height_and_last(possible_child_hash)
if child_last != last:
return None # not connected, so can't be determined
height_up = child_height - height
return height_up >= 0 and self.get_nth_parent_hash(possible_child_hash, height_up) == item_hash
class SubsetTracker(Tracker):
def __init__(self, subset_of, **kwargs):
Tracker.__init__(self, **kwargs)
self.get_nth_parent_hash = subset_of.get_nth_parent_hash # overwrites Tracker.__init__'s
self._subset_of = subset_of
def add(self, item):
if self._subset_of is not None:
assert self._delta_type.get_head(item) in self._subset_of.items
Tracker.add(self, item)
def remove(self, item_hash):
if self._subset_of is not None:
assert item_hash in self._subset_of.items
Tracker.remove(self, item_hash)
| gpl-3.0 |
KapecK/or-tools | src/ortools/linear_solver/linear_solver_natural_api.py | 2 | 6571 | # Copyright 2010-2014 Google
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patch to the python wrapper of ../linear_solver.h providing an algebraic API.
This is directly imported, and use exclusively in ./linear_solver.swig. See that
file.
For examples leveraging the code defined here, see ./pywraplp_test.py and
../../../python/linear_programming.py.
"""
import numbers
# The classes below allow linear expressions to be expressed naturally with the
# usual arithmetic operators +-*/ and with constant numbers, which makes the
# python API very intuitive. See the top-level comment for examples.
inf = float('inf')
class _FakeMPVariableRepresentingTheConstantOffset(object):
"""A dummy class for a singleton instance used to represent the constant.
To represent linear expressions, we store a dictionary
MPVariable->coefficient. To represent the constant offset of the expression,
we use this class as a substitute: its coefficient will be the offset. To
properly be evaluated, its solution_value() needs to be 1.
"""
def solution_value(self): # pylint: disable=invalid-name
return 1
def __repr__(self):
return 'OFFSET_KEY'
OFFSET_KEY = _FakeMPVariableRepresentingTheConstantOffset()
def CastToLinExp(v):
if isinstance(v, numbers.Number):
return Constant(v)
else:
return v
class LinearExpr(object):
"""Holds linear expressions.
A linear expression is essentially an offset (floating-point value), and a
dictionary mapping MPVariable objects to their coefficient (which is also a
floating-point value).
"""
SUPPORTED_OPERATOR_METHODS = [
'__%s__' % opname
for opname in ['add', 'radd', 'sub', 'rsub', 'mul', 'rmul', 'div',
'truediv', 'neg', 'eq', 'ge', 'le']
]
def solution_value(self): # pylint: disable=invalid-name
"""Value of this linear expr, using the solution_value of its vars."""
coeffs = self.GetCoeffs()
return sum(var.solution_value() * coeff for var, coeff in coeffs.items())
def AddSelfToCoeffMap(self, coeffs, multiplier):
raise NotImplementedError
def GetCoeffs(self):
coeffs = {}
self.AddSelfToCoeffMap(coeffs, 1.0)
return coeffs
def __add__(self, expr):
return Sum(self, expr)
def __radd__(self, cst):
return Sum(self, cst)
def __sub__(self, expr):
return Sum(self, -expr)
def __rsub__(self, cst):
return Sum(-self, cst)
def __mul__(self, cst):
return ProductCst(self, cst)
def __rmul__(self, cst):
return ProductCst(self, cst)
def __div__(self, cst):
return ProductCst(self, 1.0 / cst)
def __truediv__(self, cst):
return ProductCst(self, 1.0 / cst)
def __neg__(self):
return ProductCst(self, -1)
def __eq__(self, arg):
if isinstance(arg, numbers.Number):
return LinearConstraint(self, arg, arg)
else:
return LinearConstraint(self - arg, 0.0, 0.0)
def __ge__(self, arg):
if isinstance(arg, numbers.Number):
return LinearConstraint(self, arg, inf)
else:
return LinearConstraint(self - arg, 0.0, inf)
def __le__(self, arg):
if isinstance(arg, numbers.Number):
return LinearConstraint(self, -inf, arg)
else:
return LinearConstraint(self - arg, -inf, 0.0)
class VariableExpr(LinearExpr):
"""Represents a LinearExpr containing only a single variable."""
def __init__(self, mpvar):
self.__var = mpvar
def AddSelfToCoeffMap(self, coeffs, multiplier):
coeffs[self.__var] = coeffs.get(self.__var, 0.0) + multiplier
class ProductCst(LinearExpr):
"""Represents the product of a LinearExpr by a constant."""
def __init__(self, expr, coef):
self.__expr = CastToLinExp(expr)
if isinstance(coef, numbers.Number):
self.__coef = coef
else:
raise TypeError
def __str__(self):
if self.__coef == -1:
return '-' + str(self.__expr)
else:
return '(' + str(self.__coef) + ' * ' + str(self.__expr) + ')'
def AddSelfToCoeffMap(self, coeffs, multiplier):
current_multiplier = multiplier * self.__coef
if current_multiplier:
self.__expr.AddSelfToCoeffMap(coeffs, current_multiplier)
class Constant(LinearExpr):
def __init__(self, val):
self.__val = val
def __str__(self):
return str(self.__val)
def AddSelfToCoeffMap(self, coeffs, multiplier):
coeffs[OFFSET_KEY] = coeffs.get(OFFSET_KEY, 0.0) + self.__val * multiplier
class SumArray(LinearExpr):
"""Represents the sum of a list of LinearExpr."""
def __init__(self, array):
self.__array = map(CastToLinExp, array)
def __str__(self):
return '({})'.format(' + '.join(map(str, self.__array)))
def AddSelfToCoeffMap(self, coeffs, multiplier):
for arg in self.__array:
arg.AddSelfToCoeffMap(coeffs, multiplier)
def Sum(*args):
return SumArray(args)
SumCst = Sum # pylint: disable=invalid-name
class LinearConstraint(object):
"""Represents a linear constraint: LowerBound <= LinearExpr <= UpperBound."""
def __init__(self, expr, lb, ub):
self.__expr = expr
self.__lb = lb
self.__ub = ub
def __str__(self):
if self.__lb > -inf and self.__ub < inf:
if self.__lb == self.__ub:
return str(self.__expr) + ' == ' + str(self.__lb)
else:
return (str(self.__lb) + ' <= ' + str(self.__expr) +
' <= ' + str(self.__ub))
elif self.__lb > -inf:
return str(self.__expr) + ' >= ' + str(self.__lb)
elif self.__ub < inf:
return str(self.__expr) + ' <= ' + str(self.__ub)
else:
return 'Trivial inequality (always true)'
def Extract(self, solver, name=''):
"""Performs the actual creation of the constraint object."""
coeffs = self.__expr.GetCoeffs()
constant = coeffs.pop(OFFSET_KEY, 0.0)
lb = -solver.infinity()
ub = solver.infinity()
if self.__lb > -inf:
lb = self.__lb - constant
if self.__ub < inf:
ub = self.__ub - constant
constraint = solver.RowConstraint(lb, ub, name)
for v, c, in sorted(coeffs.items()):
constraint.SetCoefficient(v, float(c))
return constraint
| apache-2.0 |
sanger-pathogens/circlator | circlator/clean.py | 1 | 11703 | import os
import shutil
import tempfile
import pymummer
import pyfastaq
class Error (Exception): pass
class Cleaner:
def __init__(self,
infile,
outprefix,
min_contig_length=2000,
min_contig_percent_match=95,
nucmer_diagdiff=25,
nucmer_min_id=95,
nucmer_min_length=500,
nucmer_breaklen=500,
keepfile=None,
verbose=False
):
self.infile = os.path.abspath(infile)
self.outprefix = os.path.abspath(outprefix)
self.min_contig_length = min_contig_length
self.min_contig_percent_match = min_contig_percent_match
self.contigs_to_keep = self._get_contigs_to_keep(keepfile)
self.nucmer_diagdiff = nucmer_diagdiff
self.nucmer_min_id = nucmer_min_id
self.nucmer_min_length = nucmer_min_length
self.nucmer_breaklen = nucmer_breaklen
self.verbose = verbose
def _get_contigs_to_keep(self, filename):
'''Returns a set of names from file called filename. If filename is None, returns an empty set'''
if filename is None:
return set()
with open(filename) as f:
return {line.rstrip() for line in f}
def _remove_small_contigs(self, infile, outfile, keep=None):
'''Writes a new file with small contigs removed.
Returns lists of all names and names of removed contigs'''
removed = set()
all_names = set()
if keep is None:
keep = set()
file_reader = pyfastaq.sequences.file_reader(infile)
fout = pyfastaq.utils.open_file_write(outfile)
for seq in file_reader:
all_names.add(seq.id)
if len(seq) >= self.min_contig_length or seq.id in keep:
print(seq, file=fout)
else:
removed.add(seq.id)
pyfastaq.utils.close(fout)
return all_names, removed
def _run_nucmer(self, infile, outfile):
'''Run nucmer of assembly against itself'''
n = pymummer.nucmer.Runner(
infile,
infile,
outfile,
min_id=self.nucmer_min_id,
min_length=self.nucmer_min_length,
diagdiff=self.nucmer_diagdiff,
maxmatch=True,
breaklen=self.nucmer_breaklen,
simplify=False,
verbose=self.verbose
)
n.run()
def _load_nucmer_hits(self, infile):
'''Returns two dictionaries:
1) name=>contig length.
2) Second is dictionary of nucmer hits (ignoring self matches).
contig name => list of hits'''
hits = {}
lengths = {}
file_reader = pymummer.coords_file.reader(infile)
for al in file_reader:
if al.qry_name == al.ref_name:
continue
elif al.qry_name not in hits:
hits[al.qry_name] = []
hits[al.qry_name].append(al)
lengths[al.qry_name] = al.qry_length
lengths[al.ref_name] = al.ref_length
return lengths, hits
def _contains(self, hit):
'''Returns True iff (the query contig is contained in the reference contig and
the query contig is not flagged to be kept)'''
return (
hit.qry_name not in self.contigs_to_keep
and hit.qry_name != hit.ref_name
and (100 * hit.hit_length_qry / hit.qry_length >= self.min_contig_percent_match)
and hit.percent_identity >= self.nucmer_min_id
)
def _containing_contigs(self, hits):
'''Given a list of hits, all with same query,
returns a set of the contigs containing that query'''
return {hit.ref_name for hit in hits if self._contains(hit)}
def _get_containing_contigs(self, hits_dict):
'''Given dictionary of nucmer hits (made by self._load_nucmer_hits()), returns a dictionary.
key=contig name. Value = set of contigs that contain the key.'''
containing = {}
for qry_name in hits_dict:
d = self._containing_contigs(hits_dict[qry_name])
if len(d):
containing[qry_name] = d
return containing
def _get_all_containing(self, containing_contigs, name, exclude=None, max_depth=10):
'''containing_contigs is a dict:
key=contig name. Value = set of contigs that contain the key.
Returns alls contigs called "name" that contain that contig'''
contains_name = set()
# failsafe to prevent infinite recursion
if max_depth < 0:
return contains_name
if name in containing_contigs:
for containing_contig in containing_contigs[name]:
# if we have a contains b and b contains a, then this stops infinite recursion
if containing_contig==exclude:
continue
contains_name.add(containing_contig)
new_names = self._get_all_containing(containing_contigs, containing_contig, exclude=name,max_depth=max_depth-1)
new_names.discard(name)
contains_name.update(new_names)
return contains_name
def _expand_containing_using_transitivity(self, containing_contigs):
'''This uses a contined in b, and b contained in c to force a contained in c.
Just in case a contained in c wasn't already found by nucmer'''
for name in containing_contigs:
containing_contigs[name] = self._get_all_containing(containing_contigs, name)
return containing_contigs
def _collapse_list_of_sets(self, sets):
'''Input is a list of sets. Merges any intersecting sets in the list'''
found = True
while found:
found = False
to_intersect = None
for i in range(len(sets)):
for j in range(len(sets)):
if i == j:
continue
elif sets[i].intersection(sets[j]):
to_intersect = i, j
break
if to_intersect is not None:
break
if to_intersect is not None:
found = True
sets[i].update(sets[j])
sets.pop(j)
return sets
def _get_identical_contigs(self, hits_dict):
'''Input is a dict:
key=contig name. Value = set of contigs that contain the key.
Returns a list of sets of contigs that are equivalent'''
equivalent_contigs = []
for qry_name, containing in hits_dict.items():
equivalent = set()
for containing_name in containing:
if containing_name in hits_dict and qry_name in hits_dict[containing_name]:
equivalent.add(containing_name)
equivalent.add(qry_name)
if len(equivalent):
equivalent_contigs.append(equivalent)
equivalent_contigs = self._collapse_list_of_sets(equivalent_contigs)
return equivalent_contigs
def _longest_contig(self, contig_set, contig_lengths):
'''Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length.'''
longest_name = None
max_length = -1
for name in contig_set:
if contig_lengths[name] > max_length:
longest_name = name
max_length = contig_lengths[name]
assert max_length != -1
assert longest_name is not None
return longest_name
def _remove_identical_contigs(self, containing_contigs, contig_lengths):
'''Input is dictionary of containing contigs made by self._expand_containing_using_transitivity().
Removes redundant identical contigs, leaving one representative (the longest) of
each set of identical contigs.
Returns new version of dictionary, and a dictionary of contig name => contig it was replaced with'''
identical_contigs = self._get_identical_contigs(containing_contigs)
to_replace = {} # contig name => name to replace it with
for contig_set in identical_contigs:
longest_contig = self._longest_contig(contig_set, contig_lengths)
for name in contig_set - {longest_contig}:
assert name not in to_replace
to_replace[name] = longest_contig
for name, replace_with in to_replace.items():
if replace_with not in containing_contigs:
containing_contigs[replace_with] = set()
if name in containing_contigs:
containing_contigs[replace_with].update(containing_contigs[name])
del containing_contigs[name]
to_delete = set()
for name, names_set in containing_contigs.items():
assert name not in to_replace
new_set = {to_replace.get(x, x) for x in names_set}
new_set.discard(name)
if len(new_set) > 0:
containing_contigs[name] = new_set
else:
to_delete.add(name)
for name in to_delete:
del containing_contigs[name]
return containing_contigs, to_replace
def _clean_contigs(self, infile, outfile, containing_contigs, replaced_contigs):
file_reader = pyfastaq.sequences.file_reader(infile)
fout = pyfastaq.utils.open_file_write(outfile)
for seq in file_reader:
if seq.id not in replaced_contigs and seq.id not in containing_contigs:
print(seq, file=fout)
pyfastaq.utils.close(fout)
def _write_log(self, outfile, prefix, all_contigs, small_removed, containing_contigs, replaced_contigs):
f = pyfastaq.utils.open_file_write(outfile)
for name in sorted(all_contigs):
if name in self.contigs_to_keep:
print(prefix, name, 'user_kept', sep='\t', file=f)
elif name in small_removed:
print(prefix, name, 'small_removed', sep='\t', file=f)
elif name in containing_contigs:
print(prefix, name, 'contained in', '\t'.join(sorted(containing_contigs[name])), sep='\t', file=f)
elif name in replaced_contigs:
print(prefix, name, 'replaced with', replaced_contigs[name], sep='\t', file=f)
else:
print(prefix, name, 'kept', sep='\t', file=f)
pyfastaq.utils.close(f)
def run(self):
removed_small_file = self.outprefix + '.remove_small.fa'
names_all, names_small = self._remove_small_contigs(self.infile, removed_small_file, keep=self.contigs_to_keep)
nucmer_coords_file = self.outprefix + '.coords'
self._run_nucmer(removed_small_file, nucmer_coords_file)
contig_lengths, nucmer_hits = self._load_nucmer_hits(nucmer_coords_file)
containing_contigs = self._get_containing_contigs(nucmer_hits)
if self.verbose and len(containing_contigs) > 0:
print('\nContig\tContained in')
for x in containing_contigs:
print(x, containing_contigs[x])
print()
containing_contigs = self._expand_containing_using_transitivity(containing_contigs)
containing_contigs, replaced_contigs = self._remove_identical_contigs(containing_contigs, contig_lengths)
self._clean_contigs(removed_small_file, self.outprefix + '.fasta', containing_contigs, replaced_contigs)
self._write_log(self.outprefix + '.log', '[clean]', names_all, names_small, containing_contigs, replaced_contigs)
| gpl-3.0 |
antsant/namebench | tools/add_linkcount_and_version_to_csv.py | 174 | 1351 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add link count and version to csv"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import csv
import check_nameserver_popularity
import sys
reader = csv.reader(open(sys.argv[1]))
writer = csv.writer(open('output.csv', 'w'))
sys.path.append('..')
#sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import addr_util
from libnamebench import nameserver
for row in reader:
ip = row[0]
ns = nameserver.NameServer(ip)
ns.timeout = 0.5
ns.health_timeout = 0.5
try:
link_count = len(check_nameserver_popularity.GetUrls(ip))
except:
link_count = ''
row.insert(-1, link_count)
row.append(ns.version or '')
print "%s: %s" % (ip, ns.version)
writer.writerow(row)
| apache-2.0 |
wilvk/ansible | test/units/modules/network/f5/test_bigip_hostname.py | 28 | 3226 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_hostname import Parameters
from library.bigip_hostname import ModuleManager
from library.bigip_hostname import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_hostname import Parameters
from ansible.modules.network.f5.bigip_hostname import ModuleManager
from ansible.modules.network.f5.bigip_hostname import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
hostname='foo.internal.com'
)
p = Parameters(params=args)
assert p.hostname == 'foo.internal.com'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update_hostname(self, *args):
set_module_args(dict(
hostname='foo2.internal.com',
password='passsword',
server='localhost',
user='admin'
))
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
dict(
hostname='foo.internal.com'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['hostname'] == 'foo2.internal.com'
| gpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_3/django/contrib/messages/tests/base.py | 152 | 17772 | import warnings
from django import http
from django.test import TestCase
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django.utils.unittest import skipIf
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
def skipUnlessAuthIsInstalled(func):
return skipIf(
'django.contrib.auth' not in settings.INSTALLED_APPS,
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class BaseTest(TestCase):
storage_class = default_storage
restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS']
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self._remembered_settings = {}
for setting in self.restore_settings:
if hasattr(settings, setting):
self._remembered_settings[setting] = getattr(settings, setting)
delattr(settings._wrapped, setting)
# Backup these manually because we do not want them deleted.
self._middleware_classes = settings.MIDDLEWARE_CLASSES
self._template_context_processors = \
settings.TEMPLATE_CONTEXT_PROCESSORS
self._installed_apps = settings.INSTALLED_APPS
self._message_storage = settings.MESSAGE_STORAGE
settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth.models')
def tearDown(self):
for setting in self.restore_settings:
self.restore_setting(setting)
# Restore these manually (see above).
settings.MIDDLEWARE_CLASSES = self._middleware_classes
settings.TEMPLATE_CONTEXT_PROCESSORS = \
self._template_context_processors
settings.INSTALLED_APPS = self._installed_apps
settings.MESSAGE_STORAGE = self._message_storage
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
self.restore_warnings_state()
def restore_setting(self, setting):
if setting in self._remembered_settings:
value = self._remembered_settings.pop(setting)
setattr(settings, setting, value)
elif hasattr(settings, setting):
delattr(settings._wrapped, setting)
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_with_template_response(self):
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
for level in self.levels.keys():
add_url = reverse('django.contrib.messages.tests.urls.add_template_response',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@skipUnlessAuthIsInstalled
def test_middleware_disabled_auth_user(self):
"""
Tests that the messages API successfully falls back to using
user.message_set to store messages directly when the middleware is
disabled.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
user = User.objects.create_user('test', 'test@example.com', 'test')
self.client.login(username='test', password='test')
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
context_messages = list(response.context['messages'])
for msg in data['messages']:
self.assertTrue(msg in context_messages)
self.assertContains(response, msg)
def test_middleware_disabled_anon_user(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is raised when one attempts to store a message.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
def test_middleware_disabled_anon_user_fail_silently(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is not raised if 'fail_silently' = True
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), [])
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
settings.MESSAGE_LEVEL = 29
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_custom_tags(self):
settings.MESSAGE_TAGS = {
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
base.LEVEL_TAGS = utils.get_level_tags()
try:
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
finally:
# Ensure the level tags constant is put back like we found it.
self.restore_setting('MESSAGE_TAGS')
base.LEVEL_TAGS = utils.get_level_tags()
| mit |
rhattersley/iris-grib | lib/iris/tests/unit/fileformats/__init__.py | 18 | 2980 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.fileformats` package."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris.tests as tests
class TestField(tests.IrisTest):
def _test_for_coord(self, field, convert, coord_predicate, expected_points,
expected_bounds):
(factories, references, standard_name, long_name, units,
attributes, cell_methods, dim_coords_and_dims,
aux_coords_and_dims) = convert(field)
# Check for one and only one matching coordinate.
coords_and_dims = dim_coords_and_dims + aux_coords_and_dims
matching_coords = [coord for coord, _ in coords_and_dims if
coord_predicate(coord)]
self.assertEqual(len(matching_coords), 1, str(matching_coords))
coord = matching_coords[0]
# Check points and bounds.
if expected_points is not None:
self.assertArrayEqual(coord.points, expected_points)
if expected_bounds is None:
self.assertIsNone(coord.bounds)
else:
self.assertArrayEqual(coord.bounds, expected_bounds)
def assertCoordsAndDimsListsMatch(self, coords_and_dims_got,
coords_and_dims_expected):
"""
Check that coords_and_dims lists are equivalent.
The arguments are lists of pairs of (coordinate, dimensions).
The elements are compared one-to-one, by coordinate name (so the order
of the lists is _not_ significant).
It also checks that the coordinate types (DimCoord/AuxCoord) match.
"""
def sorted_by_coordname(list):
return sorted(list, key=lambda item: item[0].name())
coords_and_dims_got = sorted_by_coordname(coords_and_dims_got)
coords_and_dims_expected = sorted_by_coordname(
coords_and_dims_expected)
self.assertEqual(coords_and_dims_got, coords_and_dims_expected)
# Also check coordinate type equivalences (as Coord.__eq__ does not).
self.assertEqual(
[type(coord) for coord, dims in coords_and_dims_got],
[type(coord) for coord, dims in coords_and_dims_expected])
| gpl-3.0 |
JD86/binplist | setup.py | 3 | 1065 | #!/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import binplist
setup(name="binplist",
version=binplist.__version__,
description="A binary plist parser",
author="Jordi Sanchez",
author_email=binplist.__feedback_email__,
url="http://code.google.com/p/binplist",
license="Apache Software License",
packages=["binplist"],
test_suite = "tests",
scripts=['scripts/plist.py'],
install_requires=["pytz"],
)
| apache-2.0 |
tylerclair/py3canvas | py3canvas/tests/account_notifications.py | 1 | 2183 | """AccountNotifications API Tests for Version 1.0.
This is a testing template for the generated AccountNotificationsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.account_notifications import AccountNotificationsAPI
from py3canvas.apis.account_notifications import Accountnotification
class TestAccountNotificationsAPI(unittest.TestCase):
"""Tests for the AccountNotificationsAPI."""
def setUp(self):
self.client = AccountNotificationsAPI(secrets.instance_address, secrets.access_token)
def test_index_of_active_global_notification_for_user(self):
"""Integration test for the AccountNotificationsAPI.index_of_active_global_notification_for_user method."""
account_id = None # Change me!!
user_id = None # Change me!!
r = self.client.index_of_active_global_notification_for_user(user_id, account_id)
def test_show_global_notification(self):
"""Integration test for the AccountNotificationsAPI.show_global_notification method."""
account_id = None # Change me!!
user_id = None # Change me!!
id = None # Change me!!
r = self.client.show_global_notification(id, user_id, account_id)
def test_close_notification_for_user(self):
"""Integration test for the AccountNotificationsAPI.close_notification_for_user method."""
account_id = None # Change me!!
user_id = None # Change me!!
id = None # Change me!!
r = self.client.close_notification_for_user(id, user_id, account_id)
def test_create_global_notification(self):
"""Integration test for the AccountNotificationsAPI.create_global_notification method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_update_global_notification(self):
"""Integration test for the AccountNotificationsAPI.update_global_notification method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
| mit |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-0.96/django/core/cache/backends/locmem.py | 33 | 1423 | "Thread-safe in-memory cache backend."
from django.core.cache.backends.simple import CacheClass as SimpleCacheClass
from django.utils.synch import RWLock
import copy, time
class CacheClass(SimpleCacheClass):
def __init__(self, host, params):
SimpleCacheClass.__init__(self, host, params)
self._lock = RWLock()
def get(self, key, default=None):
should_delete = False
self._lock.reader_enters()
try:
now = time.time()
exp = self._expire_info.get(key)
if exp is None:
return default
elif exp < now:
should_delete = True
else:
return copy.deepcopy(self._cache[key])
finally:
self._lock.reader_leaves()
if should_delete:
self._lock.writer_enters()
try:
del self._cache[key]
del self._expire_info[key]
return default
finally:
self._lock.writer_leaves()
def set(self, key, value, timeout=None):
self._lock.writer_enters()
try:
SimpleCacheClass.set(self, key, value, timeout)
finally:
self._lock.writer_leaves()
def delete(self, key):
self._lock.writer_enters()
try:
SimpleCacheClass.delete(self, key)
finally:
self._lock.writer_leaves()
| lgpl-3.0 |
glove747/liberty-neutron | neutron/agent/l3/item_allocator.py | 25 | 4083 | # Copyright 2015 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
class ItemAllocator(object):
"""Manages allocation of items from a pool
Some of the allocations such as link local addresses used for routing
inside the fip namespaces need to persist across agent restarts to maintain
consistency. Persisting such allocations in the neutron database is
unnecessary and would degrade performance. ItemAllocator utilizes local
file system to track allocations made for objects of a given class.
The persistent datastore is a file. The records are one per line of
the format: key<delimiter>value. For example if the delimiter is a ','
(the default value) then the records will be: key,value (one per line)
"""
def __init__(self, state_file, ItemClass, item_pool, delimiter=','):
"""Read the file with previous allocations recorded.
See the note in the allocate method for more detail.
"""
self.ItemClass = ItemClass
self.state_file = state_file
self.allocations = {}
self.remembered = {}
self.pool = item_pool
for line in self._read():
key, saved_value = line.strip().split(delimiter)
self.remembered[key] = self.ItemClass(saved_value)
self.pool.difference_update(self.remembered.values())
def allocate(self, key):
"""Try to allocate an item of ItemClass type.
I expect this to work in all cases because I expect the pool size to be
large enough for any situation. Nonetheless, there is some defensive
programming in here.
Since the allocations are persisted, there is the chance to leak
allocations which should have been released but were not. This leak
could eventually exhaust the pool.
So, if a new allocation is needed, the code first checks to see if
there are any remembered allocations for the key. If not, it checks
the free pool. If the free pool is empty then it dumps the remembered
allocations to free the pool. This final desperate step will not
happen often in practice.
"""
if key in self.remembered:
self.allocations[key] = self.remembered.pop(key)
return self.allocations[key]
if not self.pool:
# Desperate times. Try to get more in the pool.
self.pool.update(self.remembered.values())
self.remembered.clear()
if not self.pool:
# More than 256 routers on a compute node!
raise RuntimeError("Cannot allocate item of type:"
" %s from pool using file %s"
% (self.ItemClass, self.state_file))
self.allocations[key] = self.pool.pop()
self._write_allocations()
return self.allocations[key]
def release(self, key):
self.pool.add(self.allocations.pop(key))
self._write_allocations()
def _write_allocations(self):
current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
current.extend(remembered)
self._write(current)
def _write(self, lines):
with open(self.state_file, "w") as f:
f.writelines(lines)
def _read(self):
if not os.path.exists(self.state_file):
return []
with open(self.state_file) as f:
return f.readlines()
| apache-2.0 |
BruceDai/crosswalk-test-suite | misc/webapi-service-tests/inst.py | 19 | 3212 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
print cmd
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
RabbitMC/Autofind | mean/node_modules/node-gyp/gyp/PRESUBMIT.py | 1369 | 3662 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
| mit |
Zhaoyanzhang/-myflasky | venv/lib/python2.7/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py | 33 | 6300 | # firebird/kinterbasdb.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+kinterbasdb
:name: kinterbasdb
:dbapi: kinterbasdb
:connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db\
[?key=value&key=value...]
:url: http://firebirdsql.org/index.php?op=devel&sub=python
Arguments
----------
The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
In addition, it also accepts the following:
* ``type_conv`` - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
the linked documents below for further information.
* ``concurrency_level`` - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1. See the linked documents
below for further information.
.. seealso::
http://sourceforge.net/projects/kinterbasdb
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
"""
from .base import FBDialect, FBExecutionContext
from ... import util, types as sqltypes
from re import match
import decimal
class _kinterbasdb_numeric(object):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get('enable_rowcount',
self.dialect.enable_rowcount):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = 'kinterbasdb'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric: _FBNumeric_kinterbasdb,
sqltypes.Float: _FBFloat_kinterbasdb,
}
)
def __init__(self, type_conv=200, concurrency_level=1,
enable_rowcount=True,
retaining=False, **kwargs):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
self.retaining = retaining
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
return __import__('kinterbasdb')
def do_execute(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback(self.retaining)
def do_commit(self, dbapi_connection):
dbapi_connection.commit(self.retaining)
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if opts.get('port'):
opts['host'] = "%s/%s" % (opts['host'], opts['port'])
del opts['port']
opts.update(url.query)
util.coerce_kw_type(opts, 'type_conv', int)
type_conv = opts.pop('type_conv', self.type_conv)
concurrency_level = opts.pop('concurrency_level',
self.concurrency_level)
if self.dbapi is not None:
initialized = getattr(self.dbapi, 'initialized', None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
# Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, '_initialized', False)
if not initialized:
self.dbapi.init(type_conv=type_conv,
concurrency_level=concurrency_level)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match(
r'\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase'])
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg or
'connection shutdown' in msg)
else:
return False
dialect = FBDialect_kinterbasdb
| mit |
fernandog/Medusa | tests/legacy/media/show_poster_tests.py | 1 | 1103 | # coding=utf-8
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
"""Test ShowPoster."""
from __future__ import print_function
from medusa.media.poster import ShowPoster
from medusa.tv import Series
from tests.legacy.media.generic_media_tests import GenericMediaTests
class ShowPosterTests(GenericMediaTests):
"""Test ShowPoster."""
def test_default_media_name(self):
series_obj = Series(1, 70726)
self.assertEqual(ShowPoster(series_obj, '').default_media_name, 'poster.png')
| gpl-3.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/remote/models/spm_end_hosts_not_present_grid_remote.py | 1 | 7081 | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SpmEndHostsNotPresentGridRemote(RemoteModel):
"""
This table lists end hosts that were last seen by NetMRI before but not during the user specified period of time.
| ``id:`` The internal NetMRI identifier of the grid entry.
| ``attribute type:`` number
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
| ``NeighborDeviceID:`` The internal NetMRI identifier for the end host in this neighbor relationship.
| ``attribute type:`` number
| ``NeighborType:`` The NetMRI-determined device type for this end host.
| ``attribute type:`` string
| ``NeighborIPDotted:`` The management IP address of this end host, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``NeighborIPNumeric:`` The numerical value of the end host IP address.
| ``attribute type:`` number
| ``NeighborName:`` The NetMRI name of the end host; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``NeighborMAC:`` The Media Access Controller (MAC) address of the end host.
| ``attribute type:`` string
| ``NeighborIfIndex:`` The SNMP interface index of the end host interface.
| ``attribute type:`` string
| ``OrgUniqueId:`` Organizational unique identifier of the end host.
| ``attribute type:`` string
| ``NetBIOSName:`` The NetBIOS name of the end host.
| ``attribute type:`` string
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this end host.
| ``attribute type:`` datetime
| ``LastSeen:`` The timestamp of when NetMRI last polled data from this end host.
| ``attribute type:`` datetime
| ``DeviceID:`` The NetMRI internal identifier for the switch.
| ``attribute type:`` number
| ``DeviceType:`` The NetMRI-determined device type of the switch.
| ``attribute type:`` string
| ``DeviceName:`` The NetMRI name of the switch; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``DeviceIPDotted:`` The management IP address of the switch, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``DeviceIPNumeric:`` The numerical value of the switch IP address.
| ``attribute type:`` number
| ``InterfaceID:`` The internal NetMRI identifier for the interface on the switch configured with this address.
| ``attribute type:`` number
| ``Interface:`` The interface on the switch configured with this address.
| ``attribute type:`` string
| ``ifIndex:`` The SNMP interface index of the interface on the switch configured with this address.
| ``attribute type:`` string
| ``ifDescr:`` The description of the interface, as set in the switch's configuration file.
| ``attribute type:`` string
| ``ifAlias:`` Interface alias of this interface.
| ``attribute type:`` string
| ``ifMAC:`` The interface Media Access Controller (MAC) address of this interface.
| ``attribute type:`` string
| ``ifOperStatus:`` The operational status (up/down) of this interface.
| ``attribute type:`` string
| ``ifAdminStatus:`` The configured status (up/down) of this interface.
| ``attribute type:`` string
| ``ifSpeed:`` The operational speed, in bps, of this interface.
| ``attribute type:`` number
| ``ifDuplex:`` The operational duplex of this interface.
| ``attribute type:`` string
| ``ifAdminDuplex:`` Admin setting of duplex, Auto indicates the device will try to negotiate with the other end to determine.
| ``attribute type:`` string
| ``VlanIndex:`` The numerical VLAN number (VLAN ID).
| ``attribute type:`` number
| ``VlanName:`` The name of the VLAN on the root bridge.
| ``attribute type:`` string
| ``VlanID:`` The internal NetMRI identifier of the VLAN.
| ``attribute type:`` number
| ``VTPDomain:`` Management domain name if VLAN is VTP managed.
| ``attribute type:`` string
| ``Packets:`` Total inbound and outbound packets on this interface.
| ``attribute type:`` number
| ``Errors:`` Total inbound and outbound errors on this interface.
| ``attribute type:`` number
| ``ErrorPercentage:`` Percentage of errors on this interface.
| ``attribute type:`` number
| ``VirtualNetworkID:`` The internal identifier for the network which the device is associated to.
| ``attribute type:`` number
| ``VirtualNetworkMemberName:`` The name of the VRF as configured on this device.
| ``attribute type:`` string
| ``TenantDn:`` DN of the tenant that owns the EPG the end host is assigned to
| ``attribute type:`` string
| ``BridgeDomainDn:`` DN of bridge domain the end host is connected to
| ``attribute type:`` string
| ``EPGDn:`` DN of a EPG the end host is assigned to
| ``attribute type:`` string
| ``ApName:`` Name of the access point
| ``attribute type:`` string
| ``ApIpAddress:`` IP address of the access point
| ``attribute type:`` string
| ``ApSsid:`` SSID of the access point
| ``attribute type:`` string
"""
properties = ("id",
"Network",
"NeighborDeviceID",
"NeighborType",
"NeighborIPDotted",
"NeighborIPNumeric",
"NeighborName",
"NeighborMAC",
"NeighborIfIndex",
"OrgUniqueId",
"NetBIOSName",
"FirstSeen",
"LastSeen",
"DeviceID",
"DeviceType",
"DeviceName",
"DeviceIPDotted",
"DeviceIPNumeric",
"InterfaceID",
"Interface",
"ifIndex",
"ifDescr",
"ifAlias",
"ifMAC",
"ifOperStatus",
"ifAdminStatus",
"ifSpeed",
"ifDuplex",
"ifAdminDuplex",
"VlanIndex",
"VlanName",
"VlanID",
"VTPDomain",
"Packets",
"Errors",
"ErrorPercentage",
"VirtualNetworkID",
"VirtualNetworkMemberName",
"TenantDn",
"BridgeDomainDn",
"EPGDn",
"ApName",
"ApIpAddress",
"ApSsid",
)
@property
@check_api_availability
def meta(self):
"""
User custom fields
``attribute type:`` model
"""
return self.broker.meta(**{"id": self.id})
| apache-2.0 |
sultanqasim/android_kernel_alcatel_alto45 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
duducosmos/pgs4a | python-install/lib/python2.7/distutils/tests/test_bdist_wininst.py | 99 | 1044 | """Tests for distutils.command.bdist_wininst."""
import unittest
from test.test_support import run_unittest
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
class BuildWinInstTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
pkg_pth, dist = self.create_dist()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assertTrue(len(exe_file) > 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| lgpl-2.1 |
str90/RK3188_tablet_kernel_sources | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
python-diamond/Diamond | src/collectors/ntpd/ntpd.py | 16 | 5066 | # coding=utf-8
"""
Collect stats from ntpd
#### Dependencies
* subprocess
"""
import subprocess
import diamond.collector
from diamond.collector import str_to_bool
class NtpdCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NtpdCollector, self).get_default_config_help()
config_help.update({
'ntpq_bin': 'Path to ntpq binary',
'ntpdc_bin': 'Path to ntpdc binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NtpdCollector, self).get_default_config()
config.update({
'path': 'ntpd',
'ntpq_bin': self.find_binary('/usr/bin/ntpq'),
'ntpdc_bin': self.find_binary('/usr/bin/ntpdc'),
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config
def run_command(self, command):
try:
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0]
except OSError:
self.log.exception("Unable to run %s", command)
return ""
def get_ntpq_output(self):
return self.run_command([self.config['ntpq_bin'], '-np'])
def get_ntpq_stats(self):
output = self.get_ntpq_output()
data = {}
for line in output.splitlines():
# Only care about system peer
if not line.startswith('*'):
continue
parts = line[1:].split()
data['stratum'] = {'val': parts[2], 'precision': 0}
data['when'] = {'val': parts[4], 'precision': 0}
if data['when']['val'] == '-':
# sometimes, ntpq returns value '-' for 'when', continuos
# and try other system peer
continue
data['poll'] = {'val': parts[5], 'precision': 0}
data['reach'] = {'val': parts[6], 'precision': 0}
data['delay'] = {'val': parts[7], 'precision': 6}
data['jitter'] = {'val': parts[9], 'precision': 6}
def convert_to_second(when_ntpd_ouput):
value = float(when_ntpd_ouput[:-1])
if when_ntpd_ouput.endswith('m'):
return value * 60
elif when_ntpd_ouput.endswith('h'):
return value * 3600
elif when_ntpd_ouput.endswith('d'):
return value * 86400
if 'when' in data:
if data['when']['val'] == '-':
self.log.warning('ntpq returned bad value for "when"')
return []
if data['when']['val'].endswith(('m', 'h', 'd')):
data['when']['val'] = convert_to_second(data['when']['val'])
return data.items()
def get_ntpdc_kerninfo_output(self):
return self.run_command([self.config['ntpdc_bin'], '-c', 'kerninfo'])
def get_ntpdc_kerninfo_stats(self):
output = self.get_ntpdc_kerninfo_output()
data = {}
for line in output.splitlines():
key, val = line.split(':')
val = float(val.split()[0])
if key == 'pll offset':
data['offset'] = {'val': val, 'precision': 10}
elif key == 'pll frequency':
data['frequency'] = {'val': val, 'precision': 6}
elif key == 'maximum error':
data['max_error'] = {'val': val, 'precision': 6}
elif key == 'estimated error':
data['est_error'] = {'val': val, 'precision': 6}
elif key == 'status':
data['status'] = {'val': val, 'precision': 0}
return data.items()
def get_ntpdc_sysinfo_output(self):
return self.run_command([self.config['ntpdc_bin'], '-c', 'sysinfo'])
def get_ntpdc_sysinfo_stats(self):
output = self.get_ntpdc_sysinfo_output()
data = {}
for line in output.splitlines():
key, val = line.split(':')[0:2]
try:
val = float(val.split()[0])
if key == 'root distance':
data['root_distance'] = {'val': val, 'precision': 6}
elif key == 'root dispersion':
data['root_dispersion'] = {'val': val, 'precision': 6}
except Exception:
pass
return data.items()
def collect(self):
for stat, v in self.get_ntpq_stats():
self.publish(stat, v['val'], precision=v['precision'])
for stat, v in self.get_ntpdc_kerninfo_stats():
self.publish(stat, v['val'], precision=v['precision'])
for stat, v in self.get_ntpdc_sysinfo_stats():
self.publish(stat, v['val'], precision=v['precision'])
| mit |
creasyw/IMTAphy | documentation/toolchain/docutils-0.5-py2.5.egg/docutils/core.py | 2 | 26940 | # $Id: core.py 5538 2008-03-27 15:04:56Z goodger $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`. See `The Docutils Publisher`_.
.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""
__docformat__ = 'reStructuredText'
import sys
import pprint
from types import StringType
from docutils import __version__, __version_details__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser
from docutils.transforms import Transformer
import docutils.readers.doctree
class Publisher:
"""
A facade encapsulating the high-level logic of a Docutils system.
"""
def __init__(self, reader=None, parser=None, writer=None,
source=None, source_class=io.FileInput,
destination=None, destination_class=io.FileOutput,
settings=None):
"""
Initial setup. If any of `reader`, `parser`, or `writer` are not
specified, the corresponding ``set_...`` method should be called with
a component name (`set_reader` sets the parser as well).
"""
self.document = None
"""The document tree (`docutils.nodes` objects)."""
self.reader = reader
"""A `docutils.readers.Reader` instance."""
self.parser = parser
"""A `docutils.parsers.Parser` instance."""
self.writer = writer
"""A `docutils.writers.Writer` instance."""
for component in 'reader', 'parser', 'writer':
assert not isinstance(getattr(self, component), StringType), (
'passed string "%s" as "%s" parameter; pass an instance, '
'or use the "%s_name" parameter instead (in '
'docutils.core.publish_* convenience functions).'
% (getattr(self, component), component, component))
self.source = source
"""The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
"""The destination for docutils output, a `docutils.io.Output`
instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
self.settings = settings
"""An object containing Docutils settings as instance attributes.
Set by `self.process_command_line()` or `self.get_settings()`."""
def set_reader(self, reader_name, parser, parser_name):
"""Set `self.reader` by name."""
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser
def set_writer(self, writer_name):
"""Set `self.writer` by name."""
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class()
def set_components(self, reader_name, parser_name, writer_name):
if self.reader is None:
self.set_reader(reader_name, self.parser, parser_name)
if self.parser is None:
if self.reader.parser is None:
self.reader.set_parser(parser_name)
self.parser = self.reader.parser
if self.writer is None:
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
if config_section:
if not settings_spec:
settings_spec = SettingsSpec()
settings_spec.config_section = config_section
parts = config_section.split()
if len(parts) > 1 and parts[-1] == 'application':
settings_spec.config_section_dependencies = ['applications']
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(self.parser, self.reader, self.writer, settings_spec),
defaults=defaults, read_config_files=1,
usage=usage, description=description)
return option_parser
def get_settings(self, usage=None, description=None,
settings_spec=None, config_section=None, **defaults):
"""
Set and return default settings (overrides in `defaults` dict).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
def process_programmatic_settings(self, settings_spec,
settings_overrides,
config_section):
if self.settings is None:
defaults = (settings_overrides or {}).copy()
# Propagate exceptions by default when used programmatically:
defaults.setdefault('traceback', 1)
self.get_settings(settings_spec=settings_spec,
config_section=config_section,
**defaults)
def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
if argv is None:
argv = sys.argv[1:]
self.settings = option_parser.parse_args(argv)
def set_io(self, source_path=None, destination_path=None):
if self.source is None:
self.set_source(source_path=source_path)
if self.destination is None:
self.set_destination(destination_path=destination_path)
def set_source(self, source=None, source_path=None):
if source_path is None:
source_path = self.settings._source
else:
self.settings._source = source_path
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
def set_destination(self, destination=None, destination_path=None):
if destination_path is None:
destination_path = self.settings._destination
else:
self.settings._destination = destination_path
self.destination = self.destination_class(
destination=destination, destination_path=destination_path,
encoding=self.settings.output_encoding,
error_handler=self.settings.output_encoding_error_handler)
def apply_transforms(self):
self.document.transformer.populate_from_components(
(self.source, self.reader, self.reader.parser, self.writer,
self.destination))
self.document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=None):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit, error:
exit = 1
exit_status = error.code
except Exception, error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = 1
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output
def debugging_dumps(self):
if not self.document:
return
if self.settings.dump_settings:
print >>sys.stderr, '\n::: Runtime settings:'
print >>sys.stderr, pprint.pformat(self.settings.__dict__)
if self.settings.dump_internals:
print >>sys.stderr, '\n::: Document internals:'
print >>sys.stderr, pprint.pformat(self.document.__dict__)
if self.settings.dump_transforms:
print >>sys.stderr, '\n::: Transforms applied:'
print >>sys.stderr, (' (priority, transform class, '
'pending node details, keyword args)')
print >>sys.stderr, pprint.pformat(
[(priority, '%s.%s' % (xclass.__module__, xclass.__name__),
pending and pending.details, kwargs)
for priority, xclass, pending, kwargs
in self.document.transformer.applied])
if self.settings.dump_pseudo_xml:
print >>sys.stderr, '\n::: Pseudo-XML:'
print >>sys.stderr, self.document.pformat().encode(
'raw_unicode_escape')
def report_Exception(self, error):
if isinstance(error, utils.SystemMessage):
self.report_SystemMessage(error)
elif isinstance(error, UnicodeError):
self.report_UnicodeError(error)
else:
print >>sys.stderr, '%s: %s' % (error.__class__.__name__, error)
print >>sys.stderr, ("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <docutils-users@lists.sf.net>.
Include "--traceback" output, Docutils version (%s [%s]),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, __version_details__,
sys.version.split()[0]))
def report_SystemMessage(self, error):
print >>sys.stderr, ('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level]))
def report_UnicodeError(self, error):
sys.stderr.write(
'%s: %s\n'
'\n'
'The specified output encoding (%s) cannot\n'
'handle all of the output.\n'
'Try setting "--output-encoding-error-handler" to\n'
'\n'
'* "xmlcharrefreplace" (for HTML & XML output);\n'
% (error.__class__.__name__, error,
self.settings.output_encoding))
try:
data = error.object[error.start:error.end]
sys.stderr.write(
' the output will contain "%s" and should be usable.\n'
'* "backslashreplace" (for other output formats, Python 2.3+);\n'
' look for "%s" in the output.\n'
% (data.encode('ascii', 'xmlcharrefreplace'),
data.encode('ascii', 'backslashreplace')))
except AttributeError:
sys.stderr.write(' the output should be usable as-is.\n')
sys.stderr.write(
'* "replace"; look for "?" in the output.\n'
'\n'
'"--output-encoding-error-handler" is currently set to "%s".\n'
'\n'
'Exiting due to error. Use "--traceback" to diagnose.\n'
'If the advice above doesn\'t eliminate the error,\n'
'please report it to <docutils-users@lists.sf.net>.\n'
'Include "--traceback" output, Docutils version (%s),\n'
'Python version (%s), your OS type & version, and the\n'
'command line used.\n'
% (self.settings.output_encoding_error_handler,
__version__, sys.version.split()[0]))
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sf.net/docs/user/config.html> for '
'the full reference.')
def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=1, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=None):
"""
Set up & run a `Publisher` for programmatic use with file-like I/O.
Return the encoded string output also.
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.FileInput, source=source, source_path=source_path,
destination_class=io.FileOutput,
destination=destination, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher` for programmatic use with string I/O. Return
the encoded string or Unicode string output.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
Similarly for Unicode string input (`source`)::
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_parts(source, source_path=None, source_class=io.StringInput,
destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts
def publish_doctree(source, source_path=None,
source_class=io.StringInput,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher` for programmatic use with string I/O.
Return the document tree.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's one way::
publish_doctree(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
pub = Publisher(reader=reader, parser=parser, writer=None,
settings=settings,
source_class=source_class,
destination_class=io.NullOutput)
pub.set_components(reader_name, parser_name, 'null')
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(None, None)
output = pub.publish(enable_exit_status=enable_exit_status)
return pub.document
def publish_from_doctree(document, destination_path=None,
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher` to render from an existing document
tree data structure, for programmatic use with string I/O. Return
the encoded string output.
Note that document.settings is overridden; if you want to use the settings
of the original `document`, pass settings=document.settings.
Also, new document.transformer and document.reporter objects are
generated.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_from_doctree(
..., settings_overrides={'output_encoding': 'unicode'})
Parameters: `document` is a `docutils.nodes.document` object, an existing
document tree.
Other parameters: see `publish_programmatically`.
"""
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = Publisher(reader, None, writer,
source=io.DocTreeInput(document),
destination_class=io.StringOutput, settings=settings)
if not writer and writer_name:
pub.set_writer(writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_destination(None, destination_path)
return pub.publish(enable_exit_status=enable_exit_status)
def publish_programmatically(source_class, source, source_path,
destination_class, destination, destination_path,
reader, reader_name,
parser, parser_name,
writer, writer_name,
settings, settings_spec,
settings_overrides, config_section,
enable_exit_status):
"""
Set up & run a `Publisher` for custom programmatic use. Return the
encoded string output and the Publisher object.
Applications should not need to call this function directly. If it does
seem to be necessary to call this function directly, please write to the
Docutils-develop mailing list
<http://docutils.sf.net/docs/user/mailing-lists.html#docutils-develop>.
Parameters:
* `source_class` **required**: The class for dynamically created source
objects. Typically `io.FileInput` or `io.StringInput`.
* `source`: Type depends on `source_class`:
- If `source_class` is `io.FileInput`: Either a file-like object
(must have 'read' and 'close' methods), or ``None``
(`source_path` is opened). If neither `source` nor
`source_path` are supplied, `sys.stdin` is used.
- If `source_class` is `io.StringInput` **required**: The input
string, either an encoded 8-bit string (set the
'input_encoding' setting to the correct encoding) or a Unicode
string (set the 'input_encoding' setting to 'unicode').
* `source_path`: Type depends on `source_class`:
- `io.FileInput`: Path to the input file, opened if no `source`
supplied.
- `io.StringInput`: Optional. Path to the file or object that produced
`source`. Only used for diagnostic output.
* `destination_class` **required**: The class for dynamically created
destination objects. Typically `io.FileOutput` or `io.StringOutput`.
* `destination`: Type depends on `destination_class`:
- `io.FileOutput`: Either a file-like object (must have 'write' and
'close' methods), or ``None`` (`destination_path` is opened). If
neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `io.StringOutput`: Not used; pass ``None``.
* `destination_path`: Type depends on `destination_class`:
- `io.FileOutput`: Path to the output file. Opened if no `destination`
supplied.
- `io.StringOutput`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
* `reader`: A `docutils.readers.Reader` object.
* `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
* `parser`: A `docutils.parsers.Parser` object.
* `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
* `writer`: A `docutils.writers.Writer` object.
* `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
* `settings`: A runtime settings (`docutils.frontend.Values`) object, for
dotted-attribute access to runtime settings. It's the end result of the
`SettingsSpec`, config file, and option processing. If `settings` is
passed, it's assumed to be complete and no further setting/config/option
processing is done.
* `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
extra application-specific settings definitions independently of
components. In other words, the application becomes a component, and
its settings data is processed along with that of the other components.
Used only if no `settings` specified.
* `settings_overrides`: A dictionary containing application-specific
settings defaults that override the defaults of other components.
Used only if no `settings` specified.
* `config_section`: A string, the name of the configuration file section
for this application. Overrides the ``config_section`` attribute
defined by `settings_spec`. Used only if no `settings` specified.
* `enable_exit_status`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
output = pub.publish(enable_exit_status=enable_exit_status)
return output, pub
| gpl-2.0 |
mSenyor/sl4a | python/gdata/src/gdata/Crypto/Protocol/Chaffing.py | 226 | 9467 | """This file implements the chaffing algorithm.
Winnowing and chaffing is a technique for enhancing privacy without requiring
strong encryption. In short, the technique takes a set of authenticated
message blocks (the wheat) and adds a number of chaff blocks which have
randomly chosen data and MAC fields. This means that to an adversary, the
chaff blocks look as valid as the wheat blocks, and so the authentication
would have to be performed on every block. By tailoring the number of chaff
blocks added to the message, the sender can make breaking the message
computationally infeasible. There are many other interesting properties of
the winnow/chaff technique.
For example, say Alice is sending a message to Bob. She packetizes the
message and performs an all-or-nothing transformation on the packets. Then
she authenticates each packet with a message authentication code (MAC). The
MAC is a hash of the data packet, and there is a secret key which she must
share with Bob (key distribution is an exercise left to the reader). She then
adds a serial number to each packet, and sends the packets to Bob.
Bob receives the packets, and using the shared secret authentication key,
authenticates the MACs for each packet. Those packets that have bad MACs are
simply discarded. The remainder are sorted by serial number, and passed
through the reverse all-or-nothing transform. The transform means that an
eavesdropper (say Eve) must acquire all the packets before any of the data can
be read. If even one packet is missing, the data is useless.
There's one twist: by adding chaff packets, Alice and Bob can make Eve's job
much harder, since Eve now has to break the shared secret key, or try every
combination of wheat and chaff packet to read any of the message. The cool
thing is that Bob doesn't need to add any additional code; the chaff packets
are already filtered out because their MACs don't match (in all likelihood --
since the data and MACs for the chaff packets are randomly chosen it is
possible, but very unlikely that a chaff MAC will match the chaff data). And
Alice need not even be the party adding the chaff! She could be completely
unaware that a third party, say Charles, is adding chaff packets to her
messages as they are transmitted.
For more information on winnowing and chaffing see this paper:
Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption"
http://theory.lcs.mit.edu/~rivest/chaffing.txt
"""
__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $"
from Crypto.Util.number import bytes_to_long
class Chaff:
"""Class implementing the chaff adding algorithm.
Methods for subclasses:
_randnum(size):
Returns a randomly generated number with a byte-length equal
to size. Subclasses can use this to implement better random
data and MAC generating algorithms. The default algorithm is
probably not very cryptographically secure. It is most
important that the chaff data does not contain any patterns
that can be used to discern it from wheat data without running
the MAC.
"""
def __init__(self, factor=1.0, blocksper=1):
"""Chaff(factor:float, blocksper:int)
factor is the number of message blocks to add chaff to,
expressed as a percentage between 0.0 and 1.0. blocksper is
the number of chaff blocks to include for each block being
chaffed. Thus the defaults add one chaff block to every
message block. By changing the defaults, you can adjust how
computationally difficult it could be for an adversary to
brute-force crack the message. The difficulty is expressed
as:
pow(blocksper, int(factor * number-of-blocks))
For ease of implementation, when factor < 1.0, only the first
int(factor*number-of-blocks) message blocks are chaffed.
"""
if not (0.0<=factor<=1.0):
raise ValueError, "'factor' must be between 0.0 and 1.0"
if blocksper < 0:
raise ValueError, "'blocksper' must be zero or more"
self.__factor = factor
self.__blocksper = blocksper
def chaff(self, blocks):
"""chaff( [(serial-number:int, data:string, MAC:string)] )
: [(int, string, string)]
Add chaff to message blocks. blocks is a list of 3-tuples of the
form (serial-number, data, MAC).
Chaff is created by choosing a random number of the same
byte-length as data, and another random number of the same
byte-length as MAC. The message block's serial number is
placed on the chaff block and all the packet's chaff blocks
are randomly interspersed with the single wheat block. This
method then returns a list of 3-tuples of the same form.
Chaffed blocks will contain multiple instances of 3-tuples
with the same serial number, but the only way to figure out
which blocks are wheat and which are chaff is to perform the
MAC hash and compare values.
"""
chaffedblocks = []
# count is the number of blocks to add chaff to. blocksper is the
# number of chaff blocks to add per message block that is being
# chaffed.
count = len(blocks) * self.__factor
blocksper = range(self.__blocksper)
for i, wheat in map(None, range(len(blocks)), blocks):
# it shouldn't matter which of the n blocks we add chaff to, so for
# ease of implementation, we'll just add them to the first count
# blocks
if i < count:
serial, data, mac = wheat
datasize = len(data)
macsize = len(mac)
addwheat = 1
# add chaff to this block
for j in blocksper:
import sys
chaffdata = self._randnum(datasize)
chaffmac = self._randnum(macsize)
chaff = (serial, chaffdata, chaffmac)
# mix up the order, if the 5th bit is on then put the
# wheat on the list
if addwheat and bytes_to_long(self._randnum(16)) & 0x40:
chaffedblocks.append(wheat)
addwheat = 0
chaffedblocks.append(chaff)
if addwheat:
chaffedblocks.append(wheat)
else:
# just add the wheat
chaffedblocks.append(wheat)
return chaffedblocks
def _randnum(self, size):
# TBD: Not a very secure algorithm.
# TBD: size * 2 to work around possible bug in RandomPool
from Crypto.Util import randpool
import time
pool = randpool.RandomPool(size * 2)
while size > pool.entropy:
pass
# we now have enough entropy in the pool to get size bytes of random
# data... well, probably
return pool.get_bytes(size)
if __name__ == '__main__':
text = """\
We hold these truths to be self-evident, that all men are created equal, that
they are endowed by their Creator with certain unalienable Rights, that among
these are Life, Liberty, and the pursuit of Happiness. That to secure these
rights, Governments are instituted among Men, deriving their just powers from
the consent of the governed. That whenever any Form of Government becomes
destructive of these ends, it is the Right of the People to alter or to
abolish it, and to institute new Government, laying its foundation on such
principles and organizing its powers in such form, as to them shall seem most
likely to effect their Safety and Happiness.
"""
print 'Original text:\n=========='
print text
print '=========='
# first transform the text into packets
blocks = [] ; size = 40
for i in range(0, len(text), size):
blocks.append( text[i:i+size] )
# now get MACs for all the text blocks. The key is obvious...
print 'Calculating MACs...'
from Crypto.Hash import HMAC, SHA
key = 'Jefferson'
macs = [HMAC.new(key, block, digestmod=SHA).digest()
for block in blocks]
assert len(blocks) == len(macs)
# put these into a form acceptable as input to the chaffing procedure
source = []
m = map(None, range(len(blocks)), blocks, macs)
print m
for i, data, mac in m:
source.append((i, data, mac))
# now chaff these
print 'Adding chaff...'
c = Chaff(factor=0.5, blocksper=2)
chaffed = c.chaff(source)
from base64 import encodestring
# print the chaffed message blocks. meanwhile, separate the wheat from
# the chaff
wheat = []
print 'chaffed message blocks:'
for i, data, mac in chaffed:
# do the authentication
h = HMAC.new(key, data, digestmod=SHA)
pmac = h.digest()
if pmac == mac:
tag = '-->'
wheat.append(data)
else:
tag = ' '
# base64 adds a trailing newline
print tag, '%3d' % i, \
repr(data), encodestring(mac)[:-1]
# now decode the message packets and check it against the original text
print 'Undigesting wheat...'
newtext = "".join(wheat)
if newtext == text:
print 'They match!'
else:
print 'They differ!'
| apache-2.0 |
drjeep/django | tests/model_formsets/models.py | 143 | 7751 | from __future__ import unicode_literals
import datetime
import uuid
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
@python_2_unicode_compatible
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __str__(self):
return self.title
@python_2_unicode_compatible
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=100)
def __str__(self):
return '%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, models.SET_NULL, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __str__(self):
return self.title
@python_2_unicode_compatible
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __str__(self):
return '%s - %s' % (self.title, self.notes)
@python_2_unicode_compatible
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __str__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place, models.CASCADE)
def __str__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, models.CASCADE, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
@python_2_unicode_compatible
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, models.CASCADE, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %d" % (self.owner.name, self.age)
@python_2_unicode_compatible
class Restaurant(Place):
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField(default=False)
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, models.CASCADE, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField(default=False)
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
@python_2_unicode_compatible
class Repository(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Revision(models.Model):
repository = models.ForeignKey(Repository, models.CASCADE)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __str__(self):
return "%s (%s)" % (self.revision, six.text_type(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Player(models.Model):
team = models.ForeignKey(Team, models.SET_NULL, null=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
@python_2_unicode_compatible
class Poet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poem(models.Model):
poet = models.ForeignKey(Poet, models.CASCADE)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
# Models for testing UUID primary keys
class UUIDPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
class UUIDPKChild(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
class ChildWithEditablePK(models.Model):
name = models.CharField(max_length=255, primary_key=True)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
class AutoPKChildOfUUIDPKParent(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
class AutoPKParent(models.Model):
name = models.CharField(max_length=255)
class UUIDPKChildOfAutoPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(AutoPKParent, models.CASCADE)
class ParentWithUUIDAlternateKey(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
class ChildRelatedViaAK(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(ParentWithUUIDAlternateKey, models.CASCADE, to_field='uuid')
| bsd-3-clause |
Flimm/linkchecker | linkcheck/parser/__init__.py | 9 | 5065 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Main functions for link parsing
"""
from .. import log, LOG_CHECK, strformat, url as urlutil
from ..htmlutil import linkparse
from ..HtmlParser import htmlsax
from ..bookmarks import firefox
def parse_url(url_data):
"""Parse a URL."""
if url_data.is_directory():
# both ftp and file links represent directories as HTML data
key = "html"
elif url_data.is_file() and firefox.has_sqlite and firefox.extension.search(url_data.url):
key = "firefox"
elif url_data.scheme == "itms-services":
key = "itms_services"
else:
# determine parse routine according to content types
mime = url_data.content_type
key = url_data.ContentMimetypes[mime]
funcname = "parse_"+key
if funcname in globals():
globals()[funcname](url_data)
else:
url_data.aggregate.plugin_manager.run_parser_plugins(url_data, pagetype=key)
def parse_html (url_data):
"""Parse into HTML content and search for URLs to check.
Found URLs are added to the URL queue.
"""
find_links(url_data, url_data.add_url, linkparse.LinkTags)
def parse_opera (url_data):
"""Parse an opera bookmark file."""
from ..bookmarks.opera import parse_bookmark_data
for url, name, lineno in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, line=lineno, name=name)
def parse_chromium (url_data):
"""Parse a Chromium or Google Chrome bookmark file."""
from ..bookmarks.chromium import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name)
def parse_safari (url_data):
"""Parse a Safari bookmark file."""
from ..bookmarks.safari import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name)
def parse_text (url_data):
"""Parse a text file with one url per line; comment and blank
lines are ignored."""
lineno = 0
for line in url_data.get_content().splitlines():
lineno += 1
line = line.strip()
if not line or line.startswith('#'):
continue
url_data.add_url(line, line=lineno)
def parse_css (url_data):
"""
Parse a CSS file for url() patterns.
"""
lineno = 0
linkfinder = linkparse.css_url_re.finditer
strip_comments = linkparse.strip_c_comments
for line in strip_comments(url_data.get_content()).splitlines():
lineno += 1
for mo in linkfinder(line):
column = mo.start("url")
url = strformat.unquote(mo.group("url").strip())
url_data.add_url(url, line=lineno, column=column)
def parse_swf (url_data):
"""Parse a SWF file for URLs."""
linkfinder = linkparse.swf_url_re.finditer
for mo in linkfinder(url_data.get_content()):
url = mo.group()
url_data.add_url(url)
def parse_wml (url_data):
"""Parse into WML content and search for URLs to check.
Found URLs are added to the URL queue.
"""
find_links(url_data, url_data.add_url, linkparse.WmlTags)
def find_links (url_data, callback, tags):
"""Parse into content and search for URLs to check.
Found URLs are added to the URL queue.
"""
# construct parser object
handler = linkparse.LinkFinder(callback, tags)
parser = htmlsax.parser(handler)
if url_data.charset:
parser.encoding = url_data.charset
handler.parser = parser
# parse
try:
parser.feed(url_data.get_content())
parser.flush()
except linkparse.StopParse as msg:
log.debug(LOG_CHECK, "Stopped parsing: %s", msg)
pass
# break cyclic dependencies
handler.parser = None
parser.handler = None
def parse_firefox (url_data):
"""Parse a Firefox3 bookmark file."""
filename = url_data.get_os_filename()
for url, name in firefox.parse_bookmark_file(filename):
url_data.add_url(url, name=name)
def parse_itms_services(url_data):
"""Get "url" CGI parameter value as child URL."""
query = url_data.urlparts[3]
for k, v, sep in urlutil.parse_qsl(query, keep_blank_values=True):
if k == "url":
url_data.add_url(v)
break
from .sitemap import parse_sitemap, parse_sitemapindex
| gpl-2.0 |
Zarthus/CloudBotRefresh | plugins/lyrics.py | 2 | 1747 | from cloudbot import hook
from cloudbot.util import http, web
url = "http://search.azlyrics.com/search.php?q="
@hook.command()
def lyrics(text):
"""<search> - search AZLyrics.com for song lyrics"""
if "pastelyrics" in text:
dopaste = True
text = text.replace("pastelyrics", "").strip()
else:
dopaste = False
soup = http.get_soup(url + text.replace(" ", "+"))
if "Try to compose less restrictive search query" in soup.find('div', {'id': 'inn'}).text:
return "No results. Check spelling."
div = None
for i in soup.findAll('div', {'class': 'sen'}):
if "/lyrics/" in i.find('a')['href']:
div = i
break
if div:
title = div.find('a').text
link = div.find('a')['href']
if dopaste:
newsoup = http.get_soup(link)
try:
lyrics = newsoup.find('div', {'style': 'margin-left:10px;margin-right:10px;'}).text.strip()
pasteurl = " " + web.paste(lyrics)
except Exception as e:
pasteurl = " (\x02Unable to paste lyrics\x02 [{}])".format(str(e))
else:
pasteurl = ""
artist = div.find('b').text.title()
lyricsum = div.find('div').text
if "\r\n" in lyricsum.strip():
lyricsum = " / ".join(lyricsum.strip().split("\r\n")[0:4]) # truncate, format
else:
lyricsum = " / ".join(lyricsum.strip().split("\n")[0:4]) # truncate, format
return "\x02{}\x02 by \x02{}\x02 {}{} - {}".format(title, artist, web.try_shorten(link), pasteurl,
lyricsum[:-3])
else:
return "No song results. " + url + text.replace(" ", "+")
| gpl-3.0 |
sinbazhou/odoo | openerp/addons/base/workflow/workflow_report.py | 314 | 9001 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import itemgetter
import os
from openerp import report, tools
_logger = logging.getLogger(__name__)
def graph_get(cr, graph, wkf_ids, nested, workitem, witm_trans, processed_subflows):
import pydot
cr.execute('select * from wkf_activity where wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
nodes = cr.dictfetchall()
activities = {}
actfrom = {}
actto = {}
for n in nodes:
activities[n['id']] = n
if n['subflow_id'] and nested and n['subflow_id'] not in processed_subflows:
processed_subflows.add(n['subflow_id']) # don't create multiple times the same cluster.
cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
wkfinfo = cr.dictfetchone()
graph2 = pydot.Cluster('subflow'+str(n['subflow_id']), fontsize='12', label = "\"Subflow: %s\\nOSV: %s\"" % ( n['name'], wkfinfo['osv']) )
(s1,s2) = graph_get(cr, graph2, [n['subflow_id']], True, workitem, witm_trans, processed_subflows)
graph.add_subgraph(graph2)
actfrom[n['id']] = s2
actto[n['id']] = s1
else:
args = {}
if n['flow_start'] or n['flow_stop']:
args['style']='filled'
args['color']='lightgrey'
args['label']=n['name']
workitems = ''
if n['id'] in workitem:
workitems = '\\nx ' + str(workitem[n['id']])
args['label'] += workitems
args['color'] = "red"
args['style']='filled'
if n['subflow_id']:
args['shape'] = 'box'
if nested and n['subflow_id'] in processed_subflows:
cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
wkfinfo = cr.dictfetchone()
args['label'] = \
'\"Subflow: %s\\nOSV: %s\\n(already expanded)%s\"' % \
(n['name'], wkfinfo['osv'], workitems)
args['color'] = 'green'
args['style'] ='filled'
graph.add_node(pydot.Node(n['id'], **args))
actfrom[n['id']] = (n['id'],{})
actto[n['id']] = (n['id'],{})
node_ids = tuple(map(itemgetter('id'), nodes))
cr.execute('select * from wkf_transition where act_from IN %s ORDER BY sequence,id', (node_ids,))
transitions = cr.dictfetchall()
for t in transitions:
if not t['act_to'] in activities:
continue
args = {
'label': str(t['condition']).replace(' or ', '\\nor ')
.replace(' and ','\\nand ')
}
if t['signal']:
args['label'] += '\\n'+str(t['signal'])
args['style'] = 'bold'
if activities[t['act_from']]['split_mode']=='AND':
args['arrowtail']='box'
elif str(activities[t['act_from']]['split_mode'])=='OR ':
args['arrowtail']='inv'
if activities[t['act_to']]['join_mode']=='AND':
args['arrowhead']='crow'
if t['id'] in witm_trans:
args['color'] = 'red'
activity_from = actfrom[t['act_from']][1].get(t['signal'], actfrom[t['act_from']][0])
activity_to = actto[t['act_to']][1].get(t['signal'], actto[t['act_to']][0])
graph.add_edge(pydot.Edge( str(activity_from) ,str(activity_to), fontsize='10', **args))
cr.execute('select * from wkf_activity where flow_start=True and wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
start = cr.fetchone()[0]
cr.execute("select 'subflow.'||name,id from wkf_activity where flow_stop=True and wkf_id in ("+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
stop = cr.fetchall()
if stop:
stop = (stop[0][1], dict(stop))
else:
stop = ("stop",{})
return (start, {}), stop
def graph_instance_get(cr, graph, inst_id, nested=False):
cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,))
inst = cr.fetchall()
def workitem_get(instance):
cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (instance,))
workitems = dict(cr.fetchall())
cr.execute('select subflow_id from wkf_workitem where inst_id=%s', (instance,))
for (subflow_id,) in cr.fetchall():
workitems.update(workitem_get(subflow_id))
return workitems
def witm_get(instance):
cr.execute("select trans_id from wkf_witm_trans where inst_id=%s", (instance,))
return set(t[0] for t in cr.fetchall())
processed_subflows = set()
graph_get(cr, graph, [x[0] for x in inst], nested, workitem_get(inst_id), witm_get(inst_id), processed_subflows)
#
# TODO: pas clean: concurrent !!!
#
class report_graph_instance(object):
def __init__(self, cr, uid, ids, data):
try:
import pydot
except Exception,e:
_logger.warning(
'Import Error for pydot, you will not be able to render workflows.\n'
'Consider Installing PyDot or dependencies: http://dkbza.org/pydot.html.')
raise e
self.done = False
try:
cr.execute('select * from wkf where osv=%s limit 1',
(data['model'],))
wkfinfo = cr.dictfetchone()
if not wkfinfo:
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow defined) show
showpage'''
else:
cr.execute('select i.id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where res_id=%s and osv=%s',(data['id'],data['model']))
inst_ids = cr.fetchall()
if not inst_ids:
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow instance defined) show
showpage'''
else:
graph = pydot.Dot(graph_name=data['model'].replace('.','_'),
fontsize='16',
label="""\\\n\\nWorkflow: %s\\n OSV: %s""" % (wkfinfo['name'],wkfinfo['osv']),
size='7.3, 10.1', center='1', ratio='auto', rotate='0', rankdir='TB',
)
for inst_id in inst_ids:
inst_id = inst_id[0]
graph_instance_get(cr, graph, inst_id, data.get('nested', False))
ps_string = graph.create(prog='dot', format='ps')
except Exception:
_logger.exception('Exception in call:')
# string is in PS, like the success message would have been
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow available) show
showpage'''
if os.name == "nt":
prog = 'ps2pdf.bat'
else:
prog = 'ps2pdf'
args = (prog, '-', '-')
input, output = tools.exec_command_pipe(*args)
input.write(ps_string)
input.close()
self.result = output.read()
output.close()
self.done = True
def is_done(self):
return self.done
def get(self):
if self.done:
return self.result
else:
return None
class report_graph(report.interface.report_int):
def __init__(self, name, table):
report.interface.report_int.__init__(self, name)
self.table = table
def result(self):
if self.obj.is_done():
return True, self.obj.get(), 'pdf'
else:
return False, False, False
def create(self, cr, uid, ids, data, context=None):
self.obj = report_graph_instance(cr, uid, ids, data)
return self.obj.get(), 'pdf'
report_graph('report.workflow.instance.graph', 'ir.workflow')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ms-azure-cloudbroker/cloudbridge | cloudbridge/cloud/providers/aws/services.py | 1 | 27260 | """Services implemented by the AWS provider."""
import string
from botocore.exceptions import ClientError
from cloudbridge.cloud.base.resources import ClientPagedResultList
from cloudbridge.cloud.base.services import BaseBlockStoreService
from cloudbridge.cloud.base.services import BaseComputeService
from cloudbridge.cloud.base.services import BaseGatewayService
from cloudbridge.cloud.base.services import BaseImageService
from cloudbridge.cloud.base.services import BaseInstanceService
from cloudbridge.cloud.base.services import BaseInstanceTypesService
from cloudbridge.cloud.base.services import BaseKeyPairService
from cloudbridge.cloud.base.services import BaseNetworkService
from cloudbridge.cloud.base.services import BaseNetworkingService
from cloudbridge.cloud.base.services import BaseObjectStoreService
from cloudbridge.cloud.base.services import BaseRegionService
from cloudbridge.cloud.base.services import BaseRouterService
from cloudbridge.cloud.base.services import BaseSecurityGroupService
from cloudbridge.cloud.base.services import BaseSecurityService
from cloudbridge.cloud.base.services import BaseSnapshotService
from cloudbridge.cloud.base.services import BaseSubnetService
from cloudbridge.cloud.base.services import BaseVolumeService
from cloudbridge.cloud.interfaces.exceptions \
import InvalidConfigurationException
from cloudbridge.cloud.interfaces.resources import InstanceType
from cloudbridge.cloud.interfaces.resources import KeyPair
from cloudbridge.cloud.interfaces.resources import MachineImage
from cloudbridge.cloud.interfaces.resources import PlacementZone
from cloudbridge.cloud.interfaces.resources import SecurityGroup
from cloudbridge.cloud.interfaces.resources import Snapshot
from cloudbridge.cloud.interfaces.resources import Volume
import requests
from .helpers import BotoEC2Service
from .helpers import BotoS3Service
from .resources import AWSBucket
from .resources import AWSFloatingIP
from .resources import AWSInstance
from .resources import AWSInstanceType
from .resources import AWSInternetGateway
from .resources import AWSKeyPair
from .resources import AWSLaunchConfig
from .resources import AWSMachineImage
from .resources import AWSNetwork
from .resources import AWSRegion
from .resources import AWSRouter
from .resources import AWSSecurityGroup
from .resources import AWSSnapshot
from .resources import AWSSubnet
from .resources import AWSVolume
class AWSSecurityService(BaseSecurityService):
def __init__(self, provider):
super(AWSSecurityService, self).__init__(provider)
# Initialize provider services
self._key_pairs = AWSKeyPairService(provider)
self._security_groups = AWSSecurityGroupService(provider)
@property
def key_pairs(self):
return self._key_pairs
@property
def security_groups(self):
return self._security_groups
class AWSKeyPairService(BaseKeyPairService):
def __init__(self, provider):
super(AWSKeyPairService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSKeyPair,
boto_collection_name='key_pairs')
def get(self, key_pair_id):
return self.svc.get(key_pair_id)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='key-name', filter_value=name,
limit=limit, marker=marker)
def create(self, name):
AWSKeyPair.assert_valid_resource_name(name)
return self.svc.create('create_key_pair', KeyName=name)
class AWSSecurityGroupService(BaseSecurityGroupService):
def __init__(self, provider):
super(AWSSecurityGroupService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSSecurityGroup,
boto_collection_name='security_groups')
def get(self, sg_id):
return self.svc.get(sg_id)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
def create(self, name, description, network_id):
AWSSecurityGroup.assert_valid_resource_name(name)
return self.svc.create('create_security_group', GroupName=name,
Description=description, VpcId=network_id)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='group-name', filter_value=name,
limit=limit, marker=marker)
def delete(self, group_id):
sg = self.svc.get(group_id)
if sg:
sg.delete()
class AWSBlockStoreService(BaseBlockStoreService):
def __init__(self, provider):
super(AWSBlockStoreService, self).__init__(provider)
# Initialize provider services
self._volume_svc = AWSVolumeService(self.provider)
self._snapshot_svc = AWSSnapshotService(self.provider)
@property
def volumes(self):
return self._volume_svc
@property
def snapshots(self):
return self._snapshot_svc
class AWSVolumeService(BaseVolumeService):
def __init__(self, provider):
super(AWSVolumeService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSVolume,
boto_collection_name='volumes')
def get(self, volume_id):
return self.svc.get(volume_id)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='tag:Name', filter_value=name,
limit=limit, marker=marker)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
def create(self, name, size, zone, snapshot=None, description=None):
AWSVolume.assert_valid_resource_name(name)
zone_id = zone.id if isinstance(zone, PlacementZone) else zone
snapshot_id = snapshot.id if isinstance(
snapshot, AWSSnapshot) and snapshot else snapshot
cb_vol = self.svc.create('create_volume', Size=size,
AvailabilityZone=zone_id,
SnapshotId=snapshot_id)
# Wait until ready to tag instance
cb_vol.wait_till_ready()
cb_vol.name = name
if description:
cb_vol.description = description
return cb_vol
class AWSSnapshotService(BaseSnapshotService):
def __init__(self, provider):
super(AWSSnapshotService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSSnapshot,
boto_collection_name='snapshots')
def get(self, snapshot_id):
return self.svc.get(snapshot_id)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='tag:Name', filter_value=name,
limit=limit, marker=marker)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
def create(self, name, volume, description=None):
"""
Creates a new snapshot of a given volume.
"""
AWSSnapshot.assert_valid_resource_name(name)
volume_id = volume.id if isinstance(volume, AWSVolume) else volume
cb_snap = self.svc.create('create_snapshot', VolumeId=volume_id)
# Wait until ready to tag instance
cb_snap.wait_till_ready()
cb_snap.name = name
if cb_snap.description:
cb_snap.description = description
return cb_snap
class AWSObjectStoreService(BaseObjectStoreService):
def __init__(self, provider):
super(AWSObjectStoreService, self).__init__(provider)
self.svc = BotoS3Service(provider=self.provider,
cb_resource=AWSBucket,
boto_collection_name='buckets')
def get(self, bucket_id):
"""
Returns a bucket given its ID. Returns ``None`` if the bucket
does not exist.
"""
try:
# Make a call to make sure the bucket exists. There's an edge case
# where a 403 response can occur when the bucket exists but the
# user simply does not have permissions to access it. See below.
self.provider.s3_conn.meta.client.head_bucket(Bucket=bucket_id)
return AWSBucket(self.provider,
self.provider.s3_conn.Bucket(bucket_id))
except ClientError as e:
# If 403, it means the bucket exists, but the user does not have
# permissions to access the bucket. However, limited operations
# may be permitted (with a session token for example), so return a
# Bucket instance to allow further operations.
# http://stackoverflow.com/questions/32331456/using-boto-upload-file-to-s3-
# sub-folder-when-i-have-no-permissions-on-listing-fo
if e.response['Error']['Code'] == 403:
bucket = self.provider.s3_conn.get_bucket(bucket_id,
validate=False)
return AWSBucket(self.provider, bucket)
# For all other responses, it's assumed that the bucket does not exist.
return None
def find(self, name, limit=None, marker=None):
buckets = [bucket
for bucket in self
if name == bucket.name]
return ClientPagedResultList(self.provider, buckets,
limit=limit, marker=marker)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
def create(self, name, location=None):
AWSBucket.assert_valid_resource_name(name)
loc_constraint = location or self.provider.region_name
# Due to an API issue in S3, specifying us-east-1 as a
# LocationConstraint results in an InvalidLocationConstraint.
# Therefore, it must be special-cased and omitted altogether.
# See: https://github.com/boto/boto3/issues/125
if loc_constraint == 'us-east-1':
return self.svc.create('create_bucket', Bucket=name)
else:
return self.svc.create('create_bucket', Bucket=name,
CreateBucketConfiguration={
'LocationConstraint': loc_constraint
})
class AWSImageService(BaseImageService):
def __init__(self, provider):
super(AWSImageService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSMachineImage,
boto_collection_name='images')
def get(self, image_id):
return self.svc.get(image_id)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='name', filter_value=name,
limit=limit, marker=marker)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
class AWSComputeService(BaseComputeService):
def __init__(self, provider):
super(AWSComputeService, self).__init__(provider)
self._instance_type_svc = AWSInstanceTypesService(self.provider)
self._instance_svc = AWSInstanceService(self.provider)
self._region_svc = AWSRegionService(self.provider)
self._images_svc = AWSImageService(self.provider)
@property
def images(self):
return self._images_svc
@property
def instance_types(self):
return self._instance_type_svc
@property
def instances(self):
return self._instance_svc
@property
def regions(self):
return self._region_svc
class AWSInstanceService(BaseInstanceService):
def __init__(self, provider):
super(AWSInstanceService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSInstance,
boto_collection_name='instances')
def create(self, name, image, instance_type, subnet, zone=None,
key_pair=None, security_groups=None, user_data=None,
launch_config=None, **kwargs):
AWSInstance.assert_valid_resource_name(name)
image_id = image.id if isinstance(image, MachineImage) else image
instance_size = instance_type.id if \
isinstance(instance_type, InstanceType) else instance_type
subnet = (self.provider.networking.subnets.get(subnet)
if isinstance(subnet, str) else subnet)
zone_id = zone.id if isinstance(zone, PlacementZone) else zone
key_pair_name = key_pair.name if isinstance(
key_pair,
KeyPair) else key_pair
if launch_config:
bdm = self._process_block_device_mappings(launch_config, zone_id)
else:
bdm = None
subnet_id, zone_id, security_group_ids = \
self._resolve_launch_options(subnet, zone_id, security_groups)
placement = {'AvailabilityZone': zone_id} if zone_id else None
inst = self.svc.create('create_instances',
ImageId=image_id,
MinCount=1,
MaxCount=1,
KeyName=key_pair_name,
SecurityGroupIds=security_group_ids or None,
UserData=user_data,
InstanceType=instance_size,
Placement=placement,
BlockDeviceMappings=bdm,
SubnetId=subnet_id
)
if inst and len(inst) == 1:
# Wait until the resource exists
inst[0]._wait_till_exists()
# Tag the instance w/ the name
inst[0].name = name
return inst[0]
raise ValueError(
'Expected a single object response, got a list: %s' % inst)
def _resolve_launch_options(self, subnet=None, zone_id=None,
security_groups=None):
"""
Work out interdependent launch options.
Some launch options are required and interdependent so make sure
they conform to the interface contract.
:type subnet: ``Subnet``
:param subnet: Subnet object within which to launch.
:type zone_id: ``str``
:param zone_id: ID of the zone where the launch should happen.
:type security_groups: ``list`` of ``id``
:param zone_id: List of security group IDs.
:rtype: triplet of ``str``
:return: Subnet ID, zone ID and security group IDs for launch.
:raise ValueError: In case a conflicting combination is found.
"""
if subnet:
# subnet's zone takes precedence
zone_id = subnet.zone.id
if isinstance(security_groups, list) and isinstance(
security_groups[0], SecurityGroup):
security_group_ids = [sg.id for sg in security_groups]
else:
security_group_ids = security_groups
return subnet.id, zone_id, security_group_ids
def _process_block_device_mappings(self, launch_config, zone=None):
"""
Processes block device mapping information
and returns a Boto BlockDeviceMapping object. If new volumes
are requested (source is None and destination is VOLUME), they will be
created and the relevant volume ids included in the mapping.
"""
bdml = []
# Assign letters from f onwards
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
next_letter = iter(list(string.ascii_lowercase[6:]))
# assign ephemeral devices from 0 onwards
ephemeral_counter = 0
for device in launch_config.block_devices:
bdm = {}
if device.is_volume:
# Generate the device path
bdm['DeviceName'] = \
'/dev/sd' + ('a1' if device.is_root else next(next_letter))
ebs_def = {}
if isinstance(device.source, Snapshot):
ebs_def['SnapshotId'] = device.source.id
elif isinstance(device.source, Volume):
# TODO: We could create a snapshot from the volume
# and use that instead.
# Not supported
pass
elif isinstance(device.source, MachineImage):
# Not supported
pass
else:
# source is None, but destination is volume, therefore
# create a blank volume. This requires a size though.
if not device.size:
raise InvalidConfigurationException(
"The source is none and the destination is a"
" volume. Therefore, you must specify a size.")
ebs_def['DeleteOnTermination'] = device.delete_on_terminate
if device.size:
ebs_def['VolumeSize'] = device.size
if ebs_def:
bdm['Ebs'] = ebs_def
else: # device is ephemeral
bdm['VirtualName'] = 'ephemeral%s' % ephemeral_counter
# Append the config
bdml.append(bdm)
return bdml
def create_launch_config(self):
return AWSLaunchConfig(self.provider)
def get(self, instance_id):
return self.svc.get(instance_id)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='tag:Name', filter_value=name,
limit=limit, marker=marker)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
class AWSInstanceTypesService(BaseInstanceTypesService):
def __init__(self, provider):
super(AWSInstanceTypesService, self).__init__(provider)
@property
def instance_data(self):
"""
Fetch info about the available instances.
To update this information, update the file pointed to by the
``provider.AWS_INSTANCE_DATA_DEFAULT_URL`` above. The content for this
file should be obtained from this repo:
https://github.com/powdahound/ec2instances.info (in particular, this
file: https://raw.githubusercontent.com/powdahound/ec2instances.info/
master/www/instances.json).
TODO: Needs a caching function with timeout
"""
r = requests.get(self.provider.config.get(
"aws_instance_info_url",
self.provider.AWS_INSTANCE_DATA_DEFAULT_URL))
return r.json()
def list(self, limit=None, marker=None):
inst_types = [AWSInstanceType(self.provider, inst_type)
for inst_type in self.instance_data]
return ClientPagedResultList(self.provider, inst_types,
limit=limit, marker=marker)
class AWSRegionService(BaseRegionService):
def __init__(self, provider):
super(AWSRegionService, self).__init__(provider)
def get(self, region_id):
region = [r for r in self if r.id == region_id]
if region:
return region[0]
else:
return None
def list(self, limit=None, marker=None):
regions = [
AWSRegion(self.provider, region) for region in
self.provider.ec2_conn.meta.client.describe_regions()
.get('Regions', [])]
return ClientPagedResultList(self.provider, regions,
limit=limit, marker=marker)
@property
def current(self):
return self.get(self._provider.region_name)
class AWSNetworkingService(BaseNetworkingService):
def __init__(self, provider):
super(AWSNetworkingService, self).__init__(provider)
self._network_service = AWSNetworkService(self.provider)
self._subnet_service = AWSSubnetService(self.provider)
self._router_service = AWSRouterService(self.provider)
self._gateway_service = AWSGatewayService(self.provider)
@property
def networks(self):
return self._network_service
@property
def subnets(self):
return self._subnet_service
@property
def routers(self):
return self._router_service
@property
def gateways(self):
return self._gateway_service
class AWSNetworkService(BaseNetworkService):
def __init__(self, provider):
super(AWSNetworkService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSNetwork,
boto_collection_name='vpcs')
def get(self, network_id):
return self.svc.get(network_id)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='tag:Name', filter_value=name,
limit=limit, marker=marker)
def create(self, name, cidr_block):
AWSNetwork.assert_valid_resource_name(name)
cb_net = self.svc.create('create_vpc', CidrBlock=cidr_block)
# Wait until ready to tag instance
cb_net.wait_till_ready()
if name:
cb_net.name = name
return cb_net
@property
def floating_ips(self):
self.svc_fip = BotoEC2Service(provider=self.provider,
cb_resource=AWSFloatingIP,
boto_collection_name='vpc_addresses')
return self.svc_fip.list()
def create_floating_ip(self):
ip = self.provider.ec2_conn.meta.client.allocate_address(
Domain='vpc')
return AWSFloatingIP(
self.provider,
self.provider.ec2_conn.VpcAddress(ip.get('AllocationId')))
class AWSSubnetService(BaseSubnetService):
def __init__(self, provider):
super(AWSSubnetService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSSubnet,
boto_collection_name='subnets')
def get(self, subnet_id):
return self.svc.get(subnet_id)
def list(self, network=None, limit=None, marker=None):
network_id = network.id if isinstance(network, AWSNetwork) else network
if network_id:
return self.svc.find(
filter_name='VpcId', filter_value=network_id,
limit=limit, marker=marker)
else:
return self.svc.list(limit=limit, marker=marker)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='tag:Name', filter_value=name,
limit=limit, marker=marker)
def create(self, name, network, cidr_block, zone=None):
AWSSubnet.assert_valid_resource_name(name)
network_id = network.id if isinstance(network, AWSNetwork) else network
subnet = self.svc.create('create_subnet',
VpcId=network_id,
CidrBlock=cidr_block,
AvailabilityZone=zone)
if name:
subnet.name = name
return subnet
def get_or_create_default(self, zone=None):
if zone:
snl = self.svc.find('availabilityZone', zone)
else:
snl = self.svc.list()
for sn in snl:
# pylint:disable=protected-access
if sn._subnet.default_for_az:
return sn
# No provider-default Subnet exists, look for a library-default one
for sn in snl:
# pylint:disable=protected-access
for tag in sn._subnet.tags or {}:
if (tag.get('Key') == 'Name' and
tag.get('Value') == AWSSubnet.CB_DEFAULT_SUBNET_NAME):
return sn
# No provider-default Subnet exists, try to create it (net + subnets)
default_net = self.provider.networking.networks.create(
name=AWSNetwork.CB_DEFAULT_NETWORK_NAME, cidr_block='10.0.0.0/16')
# Create a subnet in each of the region's zones
region = self.provider.compute.regions.get(self.provider.region_name)
default_sn = None
for i, z in enumerate(region.zones):
sn = self.create(AWSSubnet.CB_DEFAULT_SUBNET_NAME, default_net,
'10.0.{0}.0/24'.format(i), z.name)
if zone and zone == z.name:
default_sn = sn
# No specific zone was supplied; return the last created subnet
if not default_sn:
default_sn = sn
return default_sn
def delete(self, subnet):
subnet_id = subnet.id if isinstance(subnet, AWSSubnet) else subnet
return self.svc.delete(subnet_id)
class AWSRouterService(BaseRouterService):
"""For AWS, a CloudBridge router corresponds to an AWS Route Table."""
def __init__(self, provider):
super(AWSRouterService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSRouter,
boto_collection_name='route_tables')
def get(self, router_id):
return self.svc.get(router_id)
def find(self, name, limit=None, marker=None):
return self.svc.find(filter_name='tag:Name', filter_value=name,
limit=limit, marker=marker)
def list(self, limit=None, marker=None):
return self.svc.list(limit=limit, marker=marker)
def create(self, name, network):
AWSRouter.assert_valid_resource_name(name)
network_id = network.id if isinstance(network, AWSNetwork) else network
cb_router = self.svc.create('create_route_table', VpcId=network_id)
if name:
cb_router.name = name
return cb_router
class AWSGatewayService(BaseGatewayService):
def __init__(self, provider):
super(AWSGatewayService, self).__init__(provider)
self.svc = BotoEC2Service(provider=self.provider,
cb_resource=AWSInternetGateway,
boto_collection_name='internet_gateways')
def get_or_create_inet_gateway(self, name):
AWSInternetGateway.assert_valid_resource_name(name)
cb_gateway = self.svc.create('create_internet_gateway')
cb_gateway.name = name
return cb_gateway
def delete(self, gateway_id):
gateway = self.svc.get(gateway_id)
if gateway:
gateway.delete()
| mit |
MichaelDoyle/Diamond | src/collectors/postqueue/test/testpostqueue.py | 31 | 2029 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from postqueue import PostqueueCollector
##########################################################################
class TestPostqueueCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PostqueueCollector', {
})
self.collector = PostqueueCollector(config, {})
def test_import(self):
self.assertTrue(PostqueueCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_emails_in_queue(self, publish_mock):
patch_collector = patch.object(
PostqueueCollector,
'get_postqueue_output',
Mock(return_value=self.getFixture(
'postqueue_emails').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'count': 3
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_empty_queue(self, publish_mock):
patch_collector = patch.object(
PostqueueCollector,
'get_postqueue_output',
Mock(return_value=self.getFixture(
'postqueue_empty').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'count': 0
}
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit |
frank10704/DF_GCS_W | MissionPlanner-master/Lib/xml/dom/minicompat.py | 62 | 3440 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| gpl-3.0 |
memtoko/django | tests/bulk_create/tests.py | 58 | 6659 | from __future__ import unicode_literals
from operator import attrgetter
from django.db import connection
from django.test import (
TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Country, Pizzeria, Restaurant, State, TwoFields
class BulkCreateTests(TestCase):
def setUp(self):
self.data = [
Country(name="United States of America", iso_two_letter="US"),
Country(name="The Netherlands", iso_two_letter="NL"),
Country(name="Germany", iso_two_letter="DE"),
Country(name="Czech Republic", iso_two_letter="CZ")
]
def test_simple(self):
created = Country.objects.bulk_create(self.data)
self.assertEqual(len(created), 4)
self.assertQuerysetEqual(Country.objects.order_by("-name"), [
"United States of America", "The Netherlands", "Germany", "Czech Republic"
], attrgetter("name"))
created = Country.objects.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Country.objects.count(), 4)
@skipUnlessDBFeature('has_bulk_insert')
def test_efficiency(self):
with self.assertNumQueries(1):
Country.objects.bulk_create(self.data)
def test_inheritance(self):
Restaurant.objects.bulk_create([
Restaurant(name="Nicholas's")
])
self.assertQuerysetEqual(Restaurant.objects.all(), [
"Nicholas's",
], attrgetter("name"))
with self.assertRaises(ValueError):
Pizzeria.objects.bulk_create([
Pizzeria(name="The Art of Pizza")
])
self.assertQuerysetEqual(Pizzeria.objects.all(), [])
self.assertQuerysetEqual(Restaurant.objects.all(), [
"Nicholas's",
], attrgetter("name"))
def test_non_auto_increment_pk(self):
State.objects.bulk_create([
State(two_letter_code=s)
for s in ["IL", "NY", "CA", "ME"]
])
self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
"CA", "IL", "ME", "NY",
], attrgetter("two_letter_code"))
@skipUnlessDBFeature('has_bulk_insert')
def test_non_auto_increment_pk_efficiency(self):
with self.assertNumQueries(1):
State.objects.bulk_create([
State(two_letter_code=s)
for s in ["IL", "NY", "CA", "ME"]
])
self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
"CA", "IL", "ME", "NY",
], attrgetter("two_letter_code"))
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
valid_country = Country(name='Germany', iso_two_letter='DE')
invalid_country = Country(id=0, name='Poland', iso_two_letter='PL')
with self.assertRaises(ValueError):
Country.objects.bulk_create([valid_country, invalid_country])
def test_batch_same_vals(self):
# Sqlite had a problem where all the same-valued models were
# collapsed to one insert.
Restaurant.objects.bulk_create([
Restaurant(name='foo') for i in range(0, 2)
])
self.assertEqual(Restaurant.objects.count(), 2)
def test_large_batch(self):
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)
])
self.assertEqual(TwoFields.objects.count(), 1001)
self.assertEqual(
TwoFields.objects.filter(f1__gte=450, f1__lte=550).count(),
101)
self.assertEqual(TwoFields.objects.filter(f2__gte=901).count(), 101)
@skipUnlessDBFeature('has_bulk_insert')
def test_large_single_field_batch(self):
# SQLite had a problem with more than 500 UNIONed selects in single
# query.
Restaurant.objects.bulk_create([
Restaurant() for i in range(0, 501)
])
@skipUnlessDBFeature('has_bulk_insert')
def test_large_batch_efficiency(self):
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)
])
self.assertLess(len(connection.queries), 10)
def test_large_batch_mixed(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)])
self.assertEqual(TwoFields.objects.count(), 1000)
# We can't assume much about the ID's created, except that the above
# created IDs must exist.
id_range = range(100000, 101000, 2)
self.assertEqual(TwoFields.objects.filter(id__in=id_range).count(), 500)
self.assertEqual(TwoFields.objects.exclude(id__in=id_range).count(), 500)
@skipUnlessDBFeature('has_bulk_insert')
def test_large_batch_mixed_efficiency(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)])
self.assertLess(len(connection.queries), 10)
def test_explicit_batch_size(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 4)]
TwoFields.objects.bulk_create(objs, 2)
self.assertEqual(TwoFields.objects.count(), len(objs))
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, len(objs))
self.assertEqual(TwoFields.objects.count(), len(objs))
@skipUnlessDBFeature('has_bulk_insert')
def test_explicit_batch_size_efficiency(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 100)]
with self.assertNumQueries(2):
TwoFields.objects.bulk_create(objs, 50)
TwoFields.objects.all().delete()
with self.assertNumQueries(1):
TwoFields.objects.bulk_create(objs, len(objs))
| bsd-3-clause |
ldoktor/autotest | client/tests/lsb_dtk/lsb_dtk.py | 2 | 8109 | import os, glob, re, logging
from autotest.client import test, utils, package
from autotest.client.test_config import config_loader
from autotest.client.shared import error
class lsb_dtk(test.test):
"""
This autotest module runs the LSB test suite.
@copyright: IBM 2008
@author: Pavan Naregundi (pnaregun@in.ibm.com)
@author: Lucas Meneghel Rodrigues (lucasmr@br.ibm.com)
"""
version = 1
def initialize(self, config):
arch = utils.get_current_kernel_arch()
if arch in ['i386', 'i486', 'i586', 'i686', 'athlon']:
self.arch = 'ia32'
elif arch == 'ppc':
self.arch = 'ppc32'
elif arch in ['s390', 's390x', 'ia64', 'x86_64', 'ppc64']:
self.arch = arch
else:
e_msg = 'Architecture %s not supported by LSB' % arch
raise error.TestError(e_msg)
self.config = config_loader(config, self.tmpdir)
self.cachedir = os.path.join(self.bindir, 'cache')
if not os.path.isdir(self.cachedir):
os.makedirs(self.cachedir)
self.packages_installed = False
self.libraries_linked = False
def install_lsb_packages(self):
if not self.packages_installed:
# First, we download the LSB DTK manager package, worry about
# installing it later
dtk_manager_arch = self.config.get('dtk-manager', 'arch-%s' % self.arch)
dtk_manager_url = self.config.get('dtk-manager',
'tarball_url') % dtk_manager_arch
if not dtk_manager_url:
raise error.TestError('Could not get DTK manager URL from'
' configuration file')
dtk_md5 = self.config.get('dtk-manager', 'md5-%s' % self.arch)
if dtk_md5:
logging.info('Caching LSB DTK manager RPM')
dtk_manager_pkg = utils.unmap_url_cache(self.cachedir,
dtk_manager_url,
dtk_md5)
else:
raise error.TestError('Could not find DTK manager package md5,'
' cannot cache DTK manager tarball')
# Get LSB tarball, cache it and uncompress under autotest srcdir
if self.config.get('lsb', 'override_default_url') == 'no':
lsb_url = self.config.get('lsb', 'tarball_url') % self.arch
else:
lsb_url = self.config.get('lsb', 'tarball_url_alt') % self.arch
if not lsb_url:
raise error.TestError('Could not get LSB URL from configuration'
' file')
md5_key = 'md5-%s' % self.arch
lsb_md5 = self.config.get('lsb', md5_key)
if lsb_md5:
logging.info('Caching LSB tarball')
lsb_pkg = utils.unmap_url_cache(self.cachedir, lsb_url, lsb_md5)
else:
raise error.TestError('Could not find LSB package md5, cannot'
' cache LSB tarball')
utils.extract_tarball_to_dir(lsb_pkg, self.srcdir)
# Lets load a file that contains the list of RPMs
os.chdir(self.srcdir)
if not os.path.isfile('inst-config'):
raise IOError('Could not find file with package info,'
' inst-config')
rpm_file_list = open('inst-config', 'r')
pkg_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
lsb_pkg_list = []
for line in rpm_file_list.readlines():
try:
# We will install lsb-dtk-manager separately, so we can remove
# it from the list of packages
if not 'lsb-dtk-manager' in line:
line = re.findall(pkg_pattern, line)[0]
lsb_pkg_list.append(line)
except Exception:
# If we don't get a match, no problem
pass
# Lets figure out the host distro
distro_pkg_support = package.os_support()
if os.path.isfile('/etc/debian_version') and \
distro_pkg_support['dpkg']:
logging.debug('Debian based distro detected')
if distro_pkg_support['conversion']:
logging.debug('Package conversion supported')
distro_type = 'debian-based'
else:
raise EnvironmentError('Package conversion not supported.'
'Cannot handle LSB package'
' installation')
elif distro_pkg_support['rpm']:
logging.debug('Red Hat based distro detected')
distro_type = 'redhat-based'
else:
logging.error('OS does not seem to be red hat or debian based')
raise EnvironmentError('Cannot handle LSB package installation')
# According to the host distro detection, we can install the packages
# using the list previously assembled
if distro_type == 'redhat-based':
logging.info('Installing LSB RPM packages')
package.install(dtk_manager_pkg)
for lsb_rpm in lsb_pkg_list:
package.install(lsb_rpm, nodeps=True)
elif distro_type == 'debian-based':
logging.info('Remember that you must have the following lsb'
' compliance packages installed:')
logging.info('lsb-core lsb-cxx lsb-graphics lsb-desktop lsb-qt4'
' lsb-languages lsb-multimedia lsb-printing')
logging.info('Converting and installing LSB packages')
dtk_manager_dpkg = package.convert(dtk_manager_pkg, 'dpkg')
package.install(dtk_manager_dpkg)
for lsb_rpm in lsb_pkg_list:
lsb_dpkg = package.convert(lsb_rpm, 'dpkg')
package.install(lsb_dpkg, nodeps=True)
self.packages_installed = True
def link_lsb_libraries(self):
if not self.libraries_linked:
logging.info('Linking LSB libraries')
libdir_key = 'libdir-%s' % self.arch
os_libdir = self.config.get('lib', libdir_key)
if not os_libdir:
raise TypeError('Could not find OS lib dir from conf file')
lib_key = 'lib-%s' % self.arch
lib_list_raw = self.config.get('lib', lib_key)
if not lib_list_raw:
raise TypeError('Could not find library list from conf file')
lib_list = eval(lib_list_raw)
# Remove any previous ld-lsb*.so symbolic links
lsb_libs = glob.glob('%s/ld-lsb*.so*' % os_libdir)
for lib in lsb_libs:
os.remove(lib)
# Get the base library that we'll use to recreate the symbolic links
system_lib = glob.glob('%s/ld-2*.so*' % os_libdir)[0]
# Now just link the system lib that we just found to each one of the
# needed LSB libraries that we provided on the conf file
for lsb_lib in lib_list:
# Get the library absolute path
lsb_lib = os.path.join(os_libdir, lsb_lib)
# Link the library system_lib -> lsb_lib
os.symlink(system_lib, lsb_lib)
self.libraries_linked = True
def run_once(self, args = 'all'):
self.install_lsb_packages()
self.link_lsb_libraries()
main_script_path = self.config.get('lsb', 'main_script_path')
logfile = os.path.join(self.resultsdir, 'lsb.log')
log_arg = '-r %s' % (logfile)
args = args + ' ' + log_arg
cmd = os.path.join(self.srcdir, main_script_path) + ' ' + args
logging.info('Executing LSB main test script')
utils.system(cmd)
| gpl-2.0 |
ykaneko/neutron | neutron/plugins/nicira/api_client/request_eventlet.py | 12 | 9190 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
import eventlet
import httplib
import json
import logging
import urllib
from neutron.plugins.nicira.api_client import request
eventlet.monkey_patch()
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
USER_AGENT = "NVP Neutron eventlet client/2.0"
class NvpApiRequestEventlet(request.NvpApiRequest):
'''Eventlet-based ApiRequest class.
This class will form the basis for eventlet-based ApiRequest classes
(e.g. those used by the Neutron NVP Plugin).
'''
# Maximum number of green threads present in the system at one time.
API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE
# Pool of green threads. One green thread is allocated per incoming
# request. Incoming requests will block when the pool is empty.
API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)
# A unique id is assigned to each incoming request. When the current
# request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID
# The request id for the next incoming request.
CURRENT_REQUEST_ID = 0
def __init__(self, nvp_api_client, url, method="GET", body=None,
headers=None,
request_timeout=request.DEFAULT_REQUEST_TIMEOUT,
retries=request.DEFAULT_RETRIES,
auto_login=True,
redirects=request.DEFAULT_REDIRECTS,
http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None):
'''Constructor.'''
self._api_client = nvp_api_client
self._url = url
self._method = method
self._body = body
self._headers = headers or {}
self._request_timeout = request_timeout
self._retries = retries
self._auto_login = auto_login
self._redirects = redirects
self._http_timeout = http_timeout
self._client_conn = client_conn
self._abort = False
self._request_error = None
if "User-Agent" not in self._headers:
self._headers["User-Agent"] = USER_AGENT
self._green_thread = None
# Retrieve and store this instance's unique request id.
self._request_id = NvpApiRequestEventlet.CURRENT_REQUEST_ID
# Update the class variable that tracks request id.
# Request IDs wrap around at MAXIMUM_REQUEST_ID
next_request_id = self._request_id + 1
next_request_id %= NvpApiRequestEventlet.MAXIMUM_REQUEST_ID
NvpApiRequestEventlet.CURRENT_REQUEST_ID = next_request_id
@classmethod
def _spawn(cls, func, *args, **kwargs):
'''Allocate a green thread from the class pool.'''
return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)
def spawn(self, func, *args, **kwargs):
'''Spawn a new green thread with the supplied function and args.'''
return self.__class__._spawn(func, *args, **kwargs)
@classmethod
def joinall(cls):
'''Wait for all outstanding requests to complete.'''
return cls.API_REQUEST_POOL.waitall()
def join(self):
'''Wait for instance green thread to complete.'''
if self._green_thread is not None:
return self._green_thread.wait()
return Exception(_('Joining an invalid green thread'))
def start(self):
'''Start request processing.'''
self._green_thread = self.spawn(self._run)
def copy(self):
'''Return a copy of this request instance.'''
return NvpApiRequestEventlet(
self._api_client, self._url, self._method, self._body,
self._headers, self._request_timeout, self._retries,
self._auto_login, self._redirects, self._http_timeout)
def _run(self):
'''Method executed within green thread.'''
if self._request_timeout:
# No timeout exception escapes the with block.
with eventlet.timeout.Timeout(self._request_timeout, False):
return self._handle_request()
LOG.info(_('[%d] Request timeout.'), self._rid())
self._request_error = Exception(_('Request timeout'))
return None
else:
return self._handle_request()
def _handle_request(self):
'''First level request handling.'''
attempt = 0
response = None
while response is None and attempt <= self._retries:
attempt += 1
req = self.spawn(self._issue_request).wait()
# automatically raises any exceptions returned.
if isinstance(req, httplib.HTTPResponse):
if attempt <= self._retries and not self._abort:
if (req.status == httplib.UNAUTHORIZED
or req.status == httplib.FORBIDDEN):
continue
# else fall through to return the error code
LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'"
": %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': req.status})
self._request_error = None
response = req
else:
LOG.info(_('[%(rid)d] Error while handling request: %(req)s'),
{'rid': self._rid(), 'req': req})
self._request_error = req
response = None
return response
class NvpLoginRequestEventlet(NvpApiRequestEventlet):
'''Process a login request.'''
def __init__(self, nvp_client, user, password, client_conn=None,
headers=None):
if headers is None:
headers = {}
headers.update({"Content-Type": "application/x-www-form-urlencoded"})
body = urllib.urlencode({"username": user, "password": password})
NvpApiRequestEventlet.__init__(
self, nvp_client, "/ws.v1/login", "POST", body, headers,
auto_login=False, client_conn=client_conn)
def session_cookie(self):
if self.successful():
return self.value.getheader("Set-Cookie")
return None
class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
'''Gej a list of API providers.'''
def __init__(self, nvp_client):
url = "/ws.v1/control-cluster/node?fields=roles"
NvpApiRequestEventlet.__init__(
self, nvp_client, url, "GET", auto_login=True)
def api_providers(self):
"""Parse api_providers from response.
Returns: api_providers in [(host, port, is_ssl), ...] format
"""
def _provider_from_listen_addr(addr):
# (pssl|ptcp):<ip>:<port> => (host, port, is_ssl)
parts = addr.split(':')
return (parts[1], int(parts[2]), parts[0] == 'pssl')
try:
if self.successful():
ret = []
body = json.loads(self.value.body)
for node in body.get('results', []):
for role in node.get('roles', []):
if role.get('role') == 'api_provider':
addr = role.get('listen_addr')
if addr:
ret.append(_provider_from_listen_addr(addr))
return ret
except Exception as e:
LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e})
# intentionally fall through
return None
class NvpGenericRequestEventlet(NvpApiRequestEventlet):
'''Handle a generic request.'''
def __init__(self, nvp_client, method, url, body, content_type,
auto_login=False,
request_timeout=request.DEFAULT_REQUEST_TIMEOUT,
http_timeout=request.DEFAULT_HTTP_TIMEOUT,
retries=request.DEFAULT_RETRIES,
redirects=request.DEFAULT_REDIRECTS):
headers = {"Content-Type": content_type}
NvpApiRequestEventlet.__init__(
self, nvp_client, url, method, body, headers,
request_timeout=request_timeout, retries=retries,
auto_login=auto_login, redirects=redirects,
http_timeout=http_timeout)
def session_cookie(self):
if self.successful():
return self.value.getheader("Set-Cookie")
return None
# Register subclasses
request.NvpApiRequest.register(NvpApiRequestEventlet)
| apache-2.0 |
eunchong/build | third_party/jinja2/ext/inlinegettext.py | 85 | 2623 | # -*- coding: utf-8 -*-
"""
Inline Gettext
~~~~~~~~~~~~~~
An example extension for Jinja2 that supports inline gettext calls.
Requires the i18n extension to be loaded.
:copyright: (c) 2009 by the Jinja Team.
:license: BSD.
"""
import re
from jinja2.ext import Extension
from jinja2.lexer import Token, count_newlines
from jinja2.exceptions import TemplateSyntaxError
_outside_re = re.compile(r'\\?(gettext|_)\(')
_inside_re = re.compile(r'\\?[()]')
class InlineGettext(Extension):
"""This extension implements support for inline gettext blocks::
<h1>_(Welcome)</h1>
<p>_(This is a paragraph)</p>
Requires the i18n extension to be loaded and configured.
"""
def filter_stream(self, stream):
paren_stack = 0
for token in stream:
if token.type is not 'data':
yield token
continue
pos = 0
lineno = token.lineno
while 1:
if not paren_stack:
match = _outside_re.search(token.value, pos)
else:
match = _inside_re.search(token.value, pos)
if match is None:
break
new_pos = match.start()
if new_pos > pos:
preval = token.value[pos:new_pos]
yield Token(lineno, 'data', preval)
lineno += count_newlines(preval)
gtok = match.group()
if gtok[0] == '\\':
yield Token(lineno, 'data', gtok[1:])
elif not paren_stack:
yield Token(lineno, 'block_begin', None)
yield Token(lineno, 'name', 'trans')
yield Token(lineno, 'block_end', None)
paren_stack = 1
else:
if gtok == '(' or paren_stack > 1:
yield Token(lineno, 'data', gtok)
paren_stack += gtok == ')' and -1 or 1
if not paren_stack:
yield Token(lineno, 'block_begin', None)
yield Token(lineno, 'name', 'endtrans')
yield Token(lineno, 'block_end', None)
pos = match.end()
if pos < len(token.value):
yield Token(lineno, 'data', token.value[pos:])
if paren_stack:
raise TemplateSyntaxError('unclosed gettext expression',
token.lineno, stream.name,
stream.filename)
| bsd-3-clause |
sajuptpm/neutron-ipam | neutron/extensions/vpnaas.py | 8 | 18111 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as qexception
from neutron.plugins.common import constants
from neutron.services.service_base import ServicePluginBase
class VPNServiceNotFound(qexception.NotFound):
message = _("VPNService %(vpnservice_id)s could not be found")
class IPsecSiteConnectionNotFound(qexception.NotFound):
message = _("ipsec_site_connection %(ipsecsite_conn_id)s not found")
class IPsecSiteConnectionDpdIntervalValueError(qexception.InvalidInput):
message = _("ipsec_site_connection %(attr)s is "
"equal to or less than dpd_interval")
class IPsecSiteConnectionMtuError(qexception.InvalidInput):
message = _("ipsec_site_connection MTU %(mtu)d is too small "
"for ipv%(version)s")
class IKEPolicyNotFound(qexception.NotFound):
message = _("IKEPolicy %(ikepolicy_id)s could not be found")
class IPsecPolicyNotFound(qexception.NotFound):
message = _("IPsecPolicy %(ipsecpolicy_id)s could not be found")
class IKEPolicyInUse(qexception.InUse):
message = _("IKEPolicy %(ikepolicy_id)s is still in use")
class VPNServiceInUse(qexception.InUse):
message = _("VPNService %(vpnservice_id)s is still in use")
class RouterInUseByVPNService(qexception.InUse):
message = _("Router %(router_id)s is used by VPNService %(vpnservice_id)s")
class VPNStateInvalidToUpdate(qexception.BadRequest):
message = _("Invalid state %(state)s of vpnaas resource %(id)s"
" for updating")
class IPsecPolicyInUse(qexception.InUse):
message = _("IPsecPolicy %(ipsecpolicy_id)s is still in use")
class DeviceDriverImportError(qexception.NeutronException):
message = _("Can not load driver :%(device_driver)s")
class SubnetIsNotConnectedToRouter(qexception.BadRequest):
message = _("Subnet %(subnet_id)s is not "
"connected to Router %(router_id)s")
class RouterIsNotExternal(qexception.BadRequest):
message = _("Router %(router_id)s has no external network gateway set")
vpn_supported_initiators = ['bi-directional', 'response-only']
vpn_supported_encryption_algorithms = ['3des', 'aes-128',
'aes-192', 'aes-256']
vpn_dpd_supported_actions = [
'hold', 'clear', 'restart', 'restart-by-peer', 'disabled'
]
vpn_supported_transform_protocols = ['esp', 'ah', 'ah-esp']
vpn_supported_encapsulation_mode = ['tunnel', 'transport']
#TODO(nati) add kilobytes when we support it
vpn_supported_lifetime_units = ['seconds']
vpn_supported_pfs = ['group2', 'group5', 'group14']
vpn_supported_ike_versions = ['v1', 'v2']
vpn_supported_auth_mode = ['psk']
vpn_supported_auth_algorithms = ['sha1']
vpn_supported_phase1_negotiation_mode = ['main']
vpn_lifetime_limits = (60, attr.UNLIMITED)
positive_int = (0, attr.UNLIMITED)
RESOURCE_ATTRIBUTE_MAP = {
'vpnservices': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'ipsec_site_connections': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'peer_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'peer_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'peer_cidrs': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_to_list,
'validate': {'type:subnet_list': None},
'is_visible': True},
'route_mode': {'allow_post': False, 'allow_put': False,
'default': 'static',
'is_visible': True},
'mtu': {'allow_post': True, 'allow_put': True,
'default': '1500',
'validate': {'type:range': positive_int},
'convert_to': attr.convert_to_int,
'is_visible': True},
'initiator': {'allow_post': True, 'allow_put': True,
'default': 'bi-directional',
'validate': {'type:values': vpn_supported_initiators},
'is_visible': True},
'auth_mode': {'allow_post': False, 'allow_put': False,
'default': 'psk',
'validate': {'type:values': vpn_supported_auth_mode},
'is_visible': True},
'psk': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'dpd': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'is_visible': True,
'default': {},
'validate': {
'type:dict_or_empty': {
'actions': {
'type:values': vpn_dpd_supported_actions,
},
'interval': {
'type:range': positive_int
},
'timeout': {
'type:range': positive_int
}}}},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'vpnservice_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'ikepolicy_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'ipsecpolicy_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True}
},
'ipsecpolicies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'transform_protocol': {
'allow_post': True,
'allow_put': True,
'default': 'esp',
'validate': {
'type:values': vpn_supported_transform_protocols},
'is_visible': True},
'auth_algorithm': {
'allow_post': True,
'allow_put': True,
'default': 'sha1',
'validate': {
'type:values': vpn_supported_auth_algorithms
},
'is_visible': True},
'encryption_algorithm': {
'allow_post': True,
'allow_put': True,
'default': 'aes-128',
'validate': {
'type:values': vpn_supported_encryption_algorithms
},
'is_visible': True},
'encapsulation_mode': {
'allow_post': True,
'allow_put': True,
'default': 'tunnel',
'validate': {
'type:values': vpn_supported_encapsulation_mode
},
'is_visible': True},
'lifetime': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'units': {
'type:values': vpn_supported_lifetime_units,
},
'value': {
'type:range': vpn_lifetime_limits
}}},
'is_visible': True},
'pfs': {'allow_post': True, 'allow_put': True,
'default': 'group5',
'validate': {'type:values': vpn_supported_pfs},
'is_visible': True}
},
'ikepolicies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'auth_algorithm': {'allow_post': True, 'allow_put': True,
'default': 'sha1',
'validate': {
'type:values': vpn_supported_auth_algorithms},
'is_visible': True},
'encryption_algorithm': {
'allow_post': True, 'allow_put': True,
'default': 'aes-128',
'validate': {'type:values': vpn_supported_encryption_algorithms},
'is_visible': True},
'phase1_negotiation_mode': {
'allow_post': True, 'allow_put': True,
'default': 'main',
'validate': {
'type:values': vpn_supported_phase1_negotiation_mode
},
'is_visible': True},
'lifetime': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'units': {
'type:values': vpn_supported_lifetime_units,
},
'value': {
'type:range': vpn_lifetime_limits,
}}},
'is_visible': True},
'ike_version': {'allow_post': True, 'allow_put': True,
'default': 'v1',
'validate': {
'type:values': vpn_supported_ike_versions},
'is_visible': True},
'pfs': {'allow_post': True, 'allow_put': True,
'default': 'group5',
'validate': {'type:values': vpn_supported_pfs},
'is_visible': True}
}
}
class Vpnaas(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "VPN service"
@classmethod
def get_alias(cls):
return "vpnaas"
@classmethod
def get_description(cls):
return "Extension for VPN service"
@classmethod
def get_namespace(cls):
return "https://wiki.openstack.org/Neutron/VPNaaS"
@classmethod
def get_updated(cls):
return "2013-05-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
special_mappings = {'ikepolicies': 'ikepolicy',
'ipsecpolicies': 'ipsecpolicy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['peer_cidrs'] = 'peer_cidr'
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.VPN,
register_quota=True,
translate_name=True)
@classmethod
def get_plugin_interface(cls):
return VPNPluginBase
def update_attributes_map(self, attributes):
super(Vpnaas, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class VPNPluginBase(ServicePluginBase):
def get_plugin_name(self):
return constants.VPN
def get_plugin_type(self):
return constants.VPN
def get_plugin_description(self):
return 'VPN service plugin'
@abc.abstractmethod
def get_vpnservices(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_vpnservice(self, context, vpnservice_id, fields=None):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(self, context, vpnservice_id, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice_id):
pass
@abc.abstractmethod
def get_ipsec_site_connections(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_ipsec_site_connection(self, context,
ipsecsite_conn_id, fields=None):
pass
@abc.abstractmethod
def create_ipsec_site_connection(self, context, ipsec_site_connection):
pass
@abc.abstractmethod
def update_ipsec_site_connection(self, context,
ipsecsite_conn_id, ipsec_site_connection):
pass
@abc.abstractmethod
def delete_ipsec_site_connection(self, context, ipsecsite_conn_id):
pass
@abc.abstractmethod
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
pass
@abc.abstractmethod
def get_ikepolicies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def create_ikepolicy(self, context, ikepolicy):
pass
@abc.abstractmethod
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
pass
@abc.abstractmethod
def delete_ikepolicy(self, context, ikepolicy_id):
pass
@abc.abstractmethod
def get_ipsecpolicies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
pass
@abc.abstractmethod
def create_ipsecpolicy(self, context, ipsecpolicy):
pass
@abc.abstractmethod
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
pass
@abc.abstractmethod
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
pass
| apache-2.0 |
kidburglar/youtube-dl | youtube_dl/extractor/lcp.py | 80 | 2953 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .arkena import ArkenaIE
class LcpPlayIE(ArkenaIE):
_VALID_URL = r'https?://play\.lcp\.fr/embed/(?P<id>[^/]+)/(?P<account_id>[^/]+)/[^/]+/[^/]+'
_TESTS = [{
'url': 'http://play.lcp.fr/embed/327336/131064/darkmatter/0',
'md5': 'b8bd9298542929c06c1c15788b1f277a',
'info_dict': {
'id': '327336',
'ext': 'mp4',
'title': '327336',
'timestamp': 1456391602,
'upload_date': '20160225',
},
'params': {
'skip_download': True,
},
}]
class LcpIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lcp\.fr/(?:[^/]+/)*(?P<id>[^/]+)'
_TESTS = [{
# arkena embed
'url': 'http://www.lcp.fr/la-politique-en-video/schwartzenberg-prg-preconise-francois-hollande-de-participer-une-primaire',
'md5': 'b8bd9298542929c06c1c15788b1f277a',
'info_dict': {
'id': 'd56d03e9',
'ext': 'mp4',
'title': 'Schwartzenberg (PRG) préconise à François Hollande de participer à une primaire à gauche',
'description': 'md5:96ad55009548da9dea19f4120c6c16a8',
'timestamp': 1456488895,
'upload_date': '20160226',
},
'params': {
'skip_download': True,
},
}, {
# dailymotion live stream
'url': 'http://www.lcp.fr/le-direct',
'info_dict': {
'id': 'xji3qy',
'ext': 'mp4',
'title': 'La Chaine Parlementaire (LCP), Live TNT',
'description': 'md5:5c69593f2de0f38bd9a949f2c95e870b',
'uploader': 'LCP',
'uploader_id': 'xbz33d',
'timestamp': 1308923058,
'upload_date': '20110624',
},
'params': {
# m3u8 live stream
'skip_download': True,
},
}, {
'url': 'http://www.lcp.fr/emissions/277792-les-volontaires',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
play_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>%s?(?:(?!\1).)*)\1' % LcpPlayIE._VALID_URL,
webpage, 'play iframe', default=None, group='url')
if not play_url:
return self.url_result(url, 'Generic')
title = self._og_search_title(webpage, default=None) or self._html_search_meta(
'twitter:title', webpage, fatal=True)
description = self._html_search_meta(
('description', 'twitter:description'), webpage)
return {
'_type': 'url_transparent',
'ie_key': LcpPlayIE.ie_key(),
'url': play_url,
'display_id': display_id,
'title': title,
'description': description,
}
| unlicense |
zhan-xiong/buck | programs/buck_tool.py | 1 | 24014 | from __future__ import print_function
import errno
import glob
import json
import logging
import os
import platform
import shlex
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import traceback
import uuid
from pynailgun import NailgunConnection, NailgunException
from timing import monotonic_time_nanos
from tracing import Tracing
from subprocutils import check_output, which
BUCKD_CLIENT_TIMEOUT_MILLIS = 60000
GC_MAX_PAUSE_TARGET = 15000
JAVA_MAX_HEAP_SIZE_MB = 1000
# While waiting for the daemon to terminate, print a message at most
# every DAEMON_BUSY_MESSAGE_SECONDS seconds.
DAEMON_BUSY_MESSAGE_SECONDS = 1.0
class Resource(object):
"""Describes a resource used by this driver.
:ivar name: logical name of the resources
:ivar executable: whether the resource should/needs execute permissions
:ivar basename: required basename of the resource
"""
def __init__(self, name, executable=False, basename=None):
self.name = name
self.executable = executable
self.basename = name if basename is None else basename
# Resource that get propagated to buck via system properties.
EXPORTED_RESOURCES = [
Resource("testrunner_classes"),
Resource("path_to_asm_jar"),
Resource("logging_config_file"),
Resource("path_to_rawmanifest_py", basename='rawmanifest.py'),
Resource("path_to_pathlib_py", basename='pathlib.py'),
Resource("path_to_intellij_py"),
Resource("path_to_pex"),
Resource("path_to_pywatchman"),
Resource("path_to_typing"),
Resource("path_to_sh_binary_template"),
Resource("jacoco_agent_jar"),
Resource("report_generator_jar"),
Resource("path_to_static_content"),
Resource("path_to_pex", executable=True),
Resource("dx"),
Resource("android_agent_path"),
Resource("buck_build_type_info"),
Resource("native_exopackage_fake_path"),
]
class CommandLineArgs:
def __init__(self, cmdline):
self.args = cmdline[1:]
self.buck_options = []
self.command = None
self.command_options = []
for arg in self.args:
if (self.command is not None):
self.command_options.append(arg)
elif (arg[:1]) == "-":
self.buck_options.append(arg)
else:
self.command = arg
# Whether this is a help command that doesn't run a build
# n.b. 'buck --help clean' is *not* currently a help command
# n.b. 'buck --version' *is* a help command
def is_help(self):
return self.command is None or "--help" in self.command_options
def is_version(self):
return self.command is None and "--version" in self.buck_options
class RestartBuck(Exception):
pass
class BuckToolException(Exception):
pass
class ExecuteTarget(Exception):
def __init__(self, path, argv, envp, cwd):
self._path = path
self._argv = argv
self._envp = envp
self._cwd = cwd
def execve(self):
# Restore default handling of SIGPIPE. See https://bugs.python.org/issue1652.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
os.chdir(self._cwd)
os.execvpe(self._path, self._argv, self._envp)
class JvmCrashLogger(object):
def __init__(self, buck_tool, project_root):
self._buck_tool = buck_tool
self._project_root = project_root
def __enter__(self):
self.jvm_errors_before = self._get_jvm_errors()
def __exit__(self, type, value, traceback):
new_errors = self._get_jvm_errors() - self.jvm_errors_before
if new_errors:
self._log_jvm_errors(new_errors)
def _get_jvm_errors(self):
return set(glob.glob(os.path.join(self._project_root, 'hs_err_pid*.log')))
@staticmethod
def _format_jvm_errors(fp):
errors = []
keep = False
for line in fp:
if line.startswith('Stack:'):
keep = True
if line.startswith('--------------- P R O C E S S ---------------'):
break
if keep:
errors.append(line)
message = 'JVM native crash: ' + (errors[2] if len(errors) > 2 else '')
return message, ''.join(errors)
def _log_jvm_errors(self, errors):
for file in errors:
with open(file, 'r') as f:
message, loglines = self._format_jvm_errors(f)
logging.error(loglines)
class BuckTool(object):
def __init__(self, buck_project):
self._init_timestamp = int(round(time.time() * 1000))
self._command_line = CommandLineArgs(sys.argv)
self._buck_project = buck_project
self._tmp_dir = platform_path(buck_project.tmp_dir)
self._stdout_file = os.path.join(self._tmp_dir, "stdout")
self._stderr_file = os.path.join(self._tmp_dir, "stderr")
self._fake_buck_version = os.environ.get('BUCK_FAKE_VERSION')
if self._fake_buck_version:
logging.info("Using fake buck version: {}".format(self._fake_buck_version))
self._pathsep = os.pathsep
if sys.platform == 'cygwin':
self._pathsep = ';'
def _has_resource(self, resource):
"""Check whether the given resource exists."""
raise NotImplementedError()
def _get_resource(self, resource):
"""Return an on-disk path to the given resource.
This may cause implementations to unpack the resource at this point.
"""
raise NotImplementedError()
def _get_resource_lock_path(self):
"""Return the path to the file used to determine if another buck process is using the same
resources folder (used for cleanup coordination).
"""
raise NotImplementedError()
def _get_buck_version_uid(self):
raise NotImplementedError()
def _get_buck_version_timestamp(self):
raise NotImplementedError()
def _get_bootstrap_classpath(self):
raise NotImplementedError()
def _get_java_classpath(self):
raise NotImplementedError()
def _is_buck_production(self):
raise NotImplementedError()
def _get_extra_java_args(self):
return []
def _get_exported_resources(self):
return EXPORTED_RESOURCES
@property
def _use_buckd(self):
return not os.environ.get('NO_BUCKD')
def _environ_for_buck(self):
env = os.environ.copy()
env['CLASSPATH'] = str(self._get_bootstrap_classpath())
env['BUCK_CLASSPATH'] = str(self._get_java_classpath())
env['BUCK_TTY'] = str(int(sys.stdin.isatty()))
return env
def _run_with_nailgun(self, argv, env):
'''
Run the command using nailgun. If the daemon is busy, block until it becomes free.
'''
exit_code = 2
busy_diagnostic_displayed = False
while exit_code == 2:
with NailgunConnection(
self._buck_project.get_buckd_transport_address(),
cwd=self._buck_project.root) as c:
now = int(round(time.time() * 1000))
env['BUCK_PYTHON_SPACE_INIT_TIME'] = \
str(now - self._init_timestamp)
exit_code = c.send_command(
'com.facebook.buck.cli.Main',
argv,
env=env,
cwd=self._buck_project.root)
if exit_code == 2:
env['BUCK_BUILD_ID'] = str(uuid.uuid4())
now = time.time()
if not busy_diagnostic_displayed:
logging.info("Buck daemon is busy with another command. " +
"Waiting for it to become free...\n" +
"You can use 'buck kill' to kill buck " +
"if you suspect buck is stuck.")
busy_diagnostic_displayed = True
time.sleep(1)
return exit_code
def _run_with_buckd(self, env):
'''
Run the buck command using buckd. If the command is "run", get the path, args, etc. from
the daemon, and raise an exception that tells __main__ to run that binary
'''
with Tracing('buck', args={'command': sys.argv[1:]}):
argv = sys.argv[1:]
if len(argv) == 0 or argv[0] != 'run':
return self._run_with_nailgun(argv, env)
else:
with tempfile.NamedTemporaryFile(dir=self._tmp_dir) as argsfile:
# Splice in location of command file to run outside buckd
argv = [argv[0]] + ['--command-args-file', argsfile.name] + argv[1:]
exit_code = self._run_with_nailgun(argv, env)
if exit_code != 0:
# Build failed, so there's nothing to run. Exit normally.
return exit_code
cmd = json.load(argsfile)
path = cmd['path'].encode('utf8')
argv = [arg.encode('utf8') for arg in cmd['argv']]
envp = {k.encode('utf8'): v.encode('utf8') for k, v in cmd['envp'].iteritems()}
cwd = cmd['cwd'].encode('utf8')
raise ExecuteTarget(path, argv, envp, cwd)
def launch_buck(self, build_id):
with Tracing('BuckTool.launch_buck'):
with JvmCrashLogger(self, self._buck_project.root):
if self._command_line.command == "clean" and \
not self._command_line.is_help():
self.kill_buckd()
buck_version_uid = self._get_buck_version_uid()
if self._command_line.is_version():
print("buck version {}".format(buck_version_uid))
return 0
use_buckd = self._use_buckd
if not self._command_line.is_help():
has_watchman = bool(which('watchman'))
if use_buckd and has_watchman:
running_version = self._buck_project.get_running_buckd_version()
if running_version != buck_version_uid:
self.kill_buckd()
if not self._is_buckd_running():
self.launch_buckd(buck_version_uid=buck_version_uid)
elif use_buckd and not has_watchman:
logging.warning("Not using buckd because watchman isn't installed.")
elif not use_buckd:
logging.warning("Not using buckd because NO_BUCKD is set.")
env = self._environ_for_buck()
env['BUCK_BUILD_ID'] = build_id
if use_buckd and self._is_buckd_running():
return self._run_with_buckd(env)
command = ["buck"]
extra_default_options = [
"-Djava.io.tmpdir={0}".format(self._tmp_dir),
"-Dfile.encoding=UTF-8",
"-XX:SoftRefLRUPolicyMSPerMB=0",
"-XX:+UseG1GC",
]
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.facebook.buck.cli.Main")
command.extend(sys.argv[1:])
now = int(round(time.time() * 1000))
env['BUCK_PYTHON_SPACE_INIT_TIME'] = str(now - self._init_timestamp)
if True:
java = which("java")
if java is None:
raise BuckToolException('Could not find java on $PATH')
with Tracing('buck', args={'command': command}):
buck_exit_code = subprocess.call(command,
cwd=self._buck_project.root,
env=env,
executable=java)
return buck_exit_code
def _generate_log_entry(self, message, logs_array):
import socket
import getpass
traits = {
"severity": "SEVERE",
"logger": "com.facebook.buck.python.buck_tool.py",
"buckGitCommit": self._get_buck_version_uid(),
"os": platform.system(),
"osVersion": platform.release(),
"user": getpass.getuser(),
"hostname": socket.gethostname(),
"isSuperConsoleEnabled": "false",
"isDaemon": "false",
}
entry = {
"logs": logs_array,
"traits": traits,
"message": message,
"category": message,
"time": int(time.time()),
"logger": "com.facebook.buck.python.buck_tool.py",
}
return entry
def launch_buckd(self, buck_version_uid=None):
with Tracing('BuckTool.launch_buckd'):
setup_watchman_watch()
if buck_version_uid is None:
buck_version_uid = self._get_buck_version_uid()
# Override self._tmp_dir to a long lived directory.
buckd_tmp_dir = self._buck_project.create_buckd_tmp_dir()
ngserver_output_path = os.path.join(buckd_tmp_dir, 'ngserver-out')
'''
Use SoftRefLRUPolicyMSPerMB for immediate GC of javac output.
Set timeout to 60s (longer than the biggest GC pause seen for a 2GB
heap) and GC target to 15s. This means that the GC has to miss its
target by 100% or many 500ms heartbeats must be missed before a client
disconnection occurs. Specify port 0 to allow Nailgun to find an
available port, then parse the port number out of the first log entry.
'''
command = ["buckd"]
extra_default_options = [
"-Dbuck.buckd_launch_time_nanos={0}".format(monotonic_time_nanos()),
"-Dfile.encoding=UTF-8",
"-XX:MaxGCPauseMillis={0}".format(GC_MAX_PAUSE_TARGET),
"-XX:SoftRefLRUPolicyMSPerMB=0",
# Stop Java waking up every 50ms to collect thread
# statistics; doing it once every five seconds is much
# saner for a long-lived daemon.
"-XX:PerfDataSamplingInterval=5000",
# Do not touch most signals
"-Xrs",
# Likewise, waking up once per second just in case
# there's some rebalancing to be done is silly.
"-XX:+UnlockDiagnosticVMOptions",
"-XX:GuaranteedSafepointInterval=5000",
"-Djava.io.tmpdir={0}".format(buckd_tmp_dir),
"-Dcom.martiansoftware.nailgun.NGServer.outputPath={0}".format(
ngserver_output_path),
"-XX:+UseG1GC",
"-XX:MaxHeapFreeRatio=40",
# See https://www.infoq.com/news/2015/08/JVM-Option-mixed-mode-profiles for details
"-XX:+PreserveFramePointer",
]
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.facebook.buck.cli.Main$DaemonBootstrap")
command.append(self._buck_project.get_buckd_transport_address())
command.append("{0}".format(BUCKD_CLIENT_TIMEOUT_MILLIS))
buckd_transport_file_path = self._buck_project.get_buckd_transport_file_path()
if os.name == 'nt':
preexec_fn = None
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx#DETACHED_PROCESS
DETACHED_PROCESS = 0x00000008
creationflags = DETACHED_PROCESS
else:
# Make sure the Unix domain socket doesn't exist before this call.
try:
os.unlink(buckd_transport_file_path)
except OSError as e:
if e.errno == errno.ENOENT:
# Socket didn't previously exist.
pass
else:
raise e
'''
Change the process group of the child buckd process so that when this
script is interrupted, it does not kill buckd.
'''
def preexec_fn():
# Close any open file descriptors to further separate buckd from its
# invoking context (e.g. otherwise we'd hang when running things like
# `ssh localhost buck clean`).
dev_null_fd = os.open("/dev/null", os.O_RDWR)
os.dup2(dev_null_fd, 0)
os.dup2(dev_null_fd, 1)
os.dup2(dev_null_fd, 2)
os.close(dev_null_fd)
creationflags = 0
process = subprocess.Popen(
command,
executable=which("java"),
cwd=self._buck_project.root,
close_fds=True,
preexec_fn=preexec_fn,
env=self._environ_for_buck(),
creationflags=creationflags)
self._buck_project.save_buckd_version(buck_version_uid)
# Give Java some time to create the listening socket.
for i in range(0, 300):
if not transport_exists(buckd_transport_file_path):
time.sleep(0.01)
returncode = process.poll()
# If the process hasn't exited yet, everything is working as expected
if returncode is None:
return 0
return returncode
def kill_buckd(self):
with Tracing('BuckTool.kill_buckd'):
buckd_transport_file_path = self._buck_project.get_buckd_transport_file_path()
if transport_exists(buckd_transport_file_path):
logging.debug("Shutting down buck daemon.")
try:
with NailgunConnection(self._buck_project.get_buckd_transport_address(),
cwd=self._buck_project.root) as c:
c.send_command('ng-stop')
except NailgunException as e:
if e.code not in (NailgunException.CONNECT_FAILED,
NailgunException.CONNECTION_BROKEN,
NailgunException.UNEXPECTED_CHUNKTYPE):
raise BuckToolException(
'Unexpected error shutting down nailgun server: ' +
str(e))
self._buck_project.clean_up_buckd()
def _is_buckd_running(self):
with Tracing('BuckTool._is_buckd_running'):
transport_file_path = self._buck_project.get_buckd_transport_file_path()
if not transport_exists(transport_file_path):
return False
try:
with NailgunConnection(
self._buck_project.get_buckd_transport_address(),
stdin=None,
stdout=None,
stderr=None,
cwd=self._buck_project.root) as c:
c.send_command('ng-stats')
except NailgunException as e:
if e.code == NailgunException.CONNECT_FAILED:
return False
else:
raise
return True
def _get_java_args(self, version_uid, extra_default_options=[]):
with Tracing('BuckTool._get_java_args'):
java_args = [
"-Xmx{0}m".format(JAVA_MAX_HEAP_SIZE_MB),
"-Djava.awt.headless=true",
"-Djava.util.logging.config.class=com.facebook.buck.cli.bootstrapper.LogConfig",
"-Dbuck.test_util_no_tests_dir=true",
"-Dbuck.version_uid={0}".format(version_uid),
"-Dbuck.buckd_dir={0}".format(self._buck_project.buckd_dir),
"-Dorg.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.JavaUtilLog",
]
resource_lock_path = self._get_resource_lock_path()
if resource_lock_path is not None:
java_args.append("-Dbuck.resource_lock_path={0}".format(resource_lock_path))
for resource in self._get_exported_resources():
if self._has_resource(resource):
java_args.append(
"-Dbuck.{0}={1}".format(
resource.name, self._get_resource(resource)))
if sys.platform == "darwin":
java_args.append("-Dbuck.enable_objc=true")
java_args.append("-Djava.library.path=" + os.path.dirname(
self._get_resource(
Resource("libjcocoa.dylib"))))
if os.environ.get("BUCK_DEBUG_MODE"):
java_args.append("-agentlib:jdwp=transport=dt_socket,"
"server=y,suspend=y,address=8888")
if os.environ.get("BUCK_DEBUG_SOY"):
java_args.append("-Dbuck.soy.debug=true")
java_args.extend(extra_default_options)
if self._buck_project.buck_javaargs:
java_args.extend(shlex.split(self._buck_project.buck_javaargs))
if self._buck_project.buck_javaargs_local:
java_args.extend(shlex.split(self._buck_project.buck_javaargs_local))
java_args.extend(self._get_extra_java_args())
extra_java_args = os.environ.get("BUCK_EXTRA_JAVA_ARGS")
if extra_java_args:
java_args.extend(shlex.split(extra_java_args))
return java_args
def install_signal_handlers():
if os.name == 'posix':
signal.signal(
signal.SIGUSR1,
lambda sig, frame: traceback.print_stack(frame))
def platform_path(path):
if sys.platform != 'cygwin':
return path
return check_output(['cygpath', '-w', path]).strip()
def truncate_logs_pretty(logs):
NUMBER_OF_LINES_BEFORE = 100
NUMBER_OF_LINES_AFTER = 100
if len(logs) <= NUMBER_OF_LINES_BEFORE + NUMBER_OF_LINES_AFTER:
return logs
new_logs = logs[:NUMBER_OF_LINES_BEFORE]
new_logs.append('...<truncated>...')
new_logs.extend(logs[-NUMBER_OF_LINES_AFTER:])
return new_logs
def setup_watchman_watch():
with Tracing('BuckTool._setup_watchman_watch'):
if not which('watchman'):
message = textwrap.dedent("""\
Watchman not found, please install when using buckd.
See https://github.com/facebook/watchman for details.""")
if sys.platform == "darwin":
message += "\n(brew install watchman on OS X)"
# Bail if watchman isn't installed as we know java's
# FileSystemWatcher will take too long to process events.
raise BuckToolException(message)
logging.debug("Using watchman.")
def transport_exists(path):
return os.path.exists(path)
if os.name == 'nt':
import ctypes
from ctypes.wintypes import WIN32_FIND_DATAW as WIN32_FIND_DATA
INVALID_HANDLE_VALUE = -1
FindFirstFile = ctypes.windll.kernel32.FindFirstFileW
FindClose = ctypes.windll.kernel32.FindClose
# on windows os.path.exists doen't allow to check reliably that pipe exists
# (os.path.exists tries to open connection to a pipe)
def transport_exists(transport_path):
wfd = WIN32_FIND_DATA()
handle = FindFirstFile(transport_path, ctypes.byref(wfd))
result = handle != INVALID_HANDLE_VALUE
FindClose(handle)
return result
| apache-2.0 |
nmcspadden/Homepass-Relay | MACAddflip.py | 1 | 1888 | #!/usr/bin/python
import subprocess
# Important note: this doesn't do anything about the actual Internet Sharing. That
# will still need to be enabled independently with correct "attwifi" SSID.
# The goal here is to rotate the MAC addresses between 4E:53:50:4F:4F:40 and 4E:53:50:4F:4F:4F.
# So what we're going to do is check the current value of the last chunk, make sure it's
# between 64-79 decimal, and then increase it by one. If it's not over 79, commit it. If it
# is over 79, roll back to 64.
MAC_base = '4E:53:50:4F:4F:'
WiFi = 'en0' # assumes you're on an MBA/rMBP; otherwise this will be 'en1'
# First, find out the current MAC address using ifconfig en0 ether
p1 = ['/sbin/ifconfig',WiFi,'ether']
p2 = ['/usr/bin/tail','-n','1']
p3 = ['/usr/bin/cut','-f','6','-d:']
result1 = subprocess.Popen(p1, stdout=subprocess.PIPE)
result2 = subprocess.Popen(p2, stdin=result1.stdout, stdout=subprocess.PIPE)
result1.stdout.close()
result3 = subprocess.Popen(p3, stdin=result2.stdout, stdout=subprocess.PIPE)
result2.stdout.close()
output = result3.communicate()[0].rstrip()
FinalCurrentGroup = int(output, 16)
if (FinalCurrentGroup < 64) or (FinalCurrentGroup >= 79):
FinalGroup = hex(64).lstrip("0x")
#If it's under 0x40 or at 0x4F or above, then roll set it back to 0x40.
else:
FinalGroup = hex(FinalCurrentGroup + 1).lstrip("0x")
#Otherwise, increment by one.
MAC = MAC_base + FinalGroup
# Now we commit this MAC address
p1 = ['/sbin/ifconfig',WiFi,'ether',MAC]
result1 = subprocess.Popen(p1, stdout=subprocess.PIPE)
output = result1.communicate()
# Flip the WiFi power
p1 = ['/usr/sbin/networksetup','-setairportpower',WiFi,'off']
p2 = ['/usr/sbin/networksetup','-setairportpower',WiFi,'on']
result1 = subprocess.Popen(p1, stdout=subprocess.PIPE)
output = result1.communicate()
result2 = subprocess.Popen(p2, stdout=subprocess.PIPE)
output = result2.communicate()
| apache-2.0 |
alanquillin/ryu | ryu/controller/network.py | 14 | 17355 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from ryu.base import app_manager
import ryu.exception as ryu_exc
from ryu.app.rest_nw_id import NW_ID_UNKNOWN
from ryu.controller import event
from ryu.exception import NetworkNotFound, NetworkAlreadyExist
from ryu.exception import PortAlreadyExist, PortNotFound, PortUnknown
class MacAddressAlreadyExist(ryu_exc.RyuException):
message = 'port (%(dpid)s, %(port)s) has already mac %(mac_address)s'
class EventNetworkDel(event.EventBase):
def __init__(self, network_id):
super(EventNetworkDel, self).__init__()
self.network_id = network_id
class EventNetworkPort(event.EventBase):
def __init__(self, network_id, dpid, port_no, add_del):
super(EventNetworkPort, self).__init__()
self.network_id = network_id
self.dpid = dpid
self.port_no = port_no
self.add_del = add_del
class EventMacAddress(event.EventBase):
def __init__(self, dpid, port_no, network_id, mac_address, add_del):
super(EventMacAddress, self).__init__()
assert network_id is not None
assert mac_address is not None
self.dpid = dpid
self.port_no = port_no
self.network_id = network_id
self.mac_address = mac_address
self.add_del = add_del
class Networks(dict):
"network_id -> set of (dpid, port_no)"
def __init__(self, f):
super(Networks, self).__init__()
self.send_event = f
def list_networks(self):
return list(self.keys())
def has_network(self, network_id):
return network_id in self
def update_network(self, network_id):
self.setdefault(network_id, set())
def create_network(self, network_id):
if network_id in self:
raise NetworkAlreadyExist(network_id=network_id)
self[network_id] = set()
def remove_network(self, network_id):
try:
ports = self[network_id]
except KeyError:
raise NetworkNotFound(network_id=network_id)
while ports:
(dpid, port_no) = ports.pop()
self._remove_event(network_id, dpid, port_no)
if self.pop(network_id, None) is not None:
self.send_event(EventNetworkDel(network_id))
def list_ports(self, network_id):
try:
# use list() to keep compatibility for output
# set() isn't json serializable
return list(self[network_id])
except KeyError:
raise NetworkNotFound(network_id=network_id)
def add_raw(self, network_id, dpid, port_no):
self[network_id].add((dpid, port_no))
def add_event(self, network_id, dpid, port_no):
self.send_event(
EventNetworkPort(network_id, dpid, port_no, True))
# def add(self, network_id, dpid, port_no):
# self.add_raw(network_id, dpid, port_no)
# self.add_event(network_id, dpid, port_no)
def _remove_event(self, network_id, dpid, port_no):
self.send_event(EventNetworkPort(network_id, dpid, port_no, False))
def remove_raw(self, network_id, dpid, port_no):
ports = self[network_id]
if (dpid, port_no) in ports:
ports.remove((dpid, port_no))
self._remove_event(network_id, dpid, port_no)
def remove(self, network_id, dpid, port_no):
try:
self.remove_raw(network_id, dpid, port_no)
except KeyError:
raise NetworkNotFound(network_id=network_id)
except ValueError:
raise PortNotFound(network_id=network_id, dpid=dpid, port=port_no)
def has_port(self, network_id, dpid, port):
return (dpid, port) in self[network_id]
def get_dpids(self, network_id):
try:
ports = self[network_id]
except KeyError:
return set()
# python 2.6 doesn't support set comprehension
# port = (dpid, port_no)
return set([port[0] for port in ports])
class Port(object):
def __init__(self, port_no, network_id, mac_address=None):
super(Port, self).__init__()
self.port_no = port_no
self.network_id = network_id
self.mac_address = mac_address
class DPIDs(dict):
"""dpid -> port_no -> Port(port_no, network_id, mac_address)"""
def __init__(self, f, nw_id_unknown):
super(DPIDs, self).__init__()
self.send_event = f
self.nw_id_unknown = nw_id_unknown
def setdefault_dpid(self, dpid):
return self.setdefault(dpid, {})
def _setdefault_network(self, dpid, port_no, default_network_id):
dp = self.setdefault_dpid(dpid)
return dp.setdefault(port_no, Port(port_no=port_no,
network_id=default_network_id))
def setdefault_network(self, dpid, port_no):
self._setdefault_network(dpid, port_no, self.nw_id_unknown)
def update_port(self, dpid, port_no, network_id):
port = self._setdefault_network(dpid, port_no, network_id)
port.network_id = network_id
def remove_port(self, dpid, port_no):
try:
# self.dpids[dpid][port_no] can be already deleted by
# port_deleted()
port = self[dpid].pop(port_no, None)
if port and port.network_id and port.mac_address:
self.send_event(EventMacAddress(dpid, port_no,
port.network_id,
port.mac_address,
False))
except KeyError:
raise PortNotFound(dpid=dpid, port=port_no, network_id=None)
def get_ports(self, dpid, network_id=None, mac_address=None):
if network_id is None:
return list(self.get(dpid, {}).values())
if mac_address is None:
return [p for p in self.get(dpid, {}).values()
if p.network_id == network_id]
# live-migration: There can be two ports that have same mac address.
return [p for p in self.get(dpid, {}).values()
if p.network_id == network_id and p.mac_address == mac_address]
def get_port(self, dpid, port_no):
try:
return self[dpid][port_no]
except KeyError:
raise PortNotFound(dpid=dpid, port=port_no, network_id=None)
def get_network(self, dpid, port_no):
try:
return self[dpid][port_no].network_id
except KeyError:
raise PortUnknown(dpid=dpid, port=port_no)
def get_networks(self, dpid):
return set(self[dpid].values())
def get_network_safe(self, dpid, port_no):
port = self.get(dpid, {}).get(port_no)
if port is None:
return self.nw_id_unknown
return port.network_id
def get_mac(self, dpid, port_no):
port = self.get_port(dpid, port_no)
return port.mac_address
def _set_mac(self, network_id, dpid, port_no, port, mac_address):
if not (port.network_id is None or
port.network_id == network_id or
port.network_id == self.nw_id_unknown):
raise PortNotFound(network_id=network_id, dpid=dpid, port=port_no)
port.network_id = network_id
port.mac_address = mac_address
if port.network_id and port.mac_address:
self.send_event(EventMacAddress(
dpid, port_no, port.network_id, port.mac_address,
True))
def set_mac(self, network_id, dpid, port_no, mac_address):
port = self.get_port(dpid, port_no)
if port.mac_address is not None:
raise MacAddressAlreadyExist(dpid=dpid, port=port_no,
mac_address=mac_address)
self._set_mac(network_id, dpid, port_no, port, mac_address)
def update_mac(self, network_id, dpid, port_no, mac_address):
port = self.get_port(dpid, port_no)
if port.mac_address is None:
self._set_mac(network_id, dpid, port_no, port, mac_address)
return
# For now, we don't allow changing mac address.
if port.mac_address != mac_address:
raise MacAddressAlreadyExist(dpid=dpid, port=port_no,
mac_address=port.mac_address)
MacPort = collections.namedtuple('MacPort', ('dpid', 'port_no'))
class MacToPort(collections.defaultdict):
"""mac_address -> set of MacPort(dpid, port_no)"""
def __init__(self):
super(MacToPort, self).__init__(set)
def add_port(self, dpid, port_no, mac_address):
self[mac_address].add(MacPort(dpid, port_no))
def remove_port(self, dpid, port_no, mac_address):
ports = self[mac_address]
ports.discard(MacPort(dpid, port_no))
if not ports:
del self[mac_address]
def get_ports(self, mac_address):
return self[mac_address]
class MacAddresses(dict):
"""network_id -> mac_address -> set of (dpid, port_no)"""
def add_port(self, network_id, dpid, port_no, mac_address):
mac2port = self.setdefault(network_id, MacToPort())
mac2port.add_port(dpid, port_no, mac_address)
def remove_port(self, network_id, dpid, port_no, mac_address):
mac2port = self.get(network_id)
if mac2port is None:
return
mac2port.remove_port(dpid, port_no, mac_address)
if not mac2port:
del self[network_id]
def get_ports(self, network_id, mac_address):
mac2port = self.get(network_id)
if not mac2port:
return set()
return mac2port.get_ports(mac_address)
class Network(app_manager.RyuApp):
def __init__(self, nw_id_unknown=NW_ID_UNKNOWN):
super(Network, self).__init__()
self.name = 'network'
self.nw_id_unknown = nw_id_unknown
self.networks = Networks(self.send_event_to_observers)
self.dpids = DPIDs(self.send_event_to_observers, nw_id_unknown)
self.mac_addresses = MacAddresses()
def _check_nw_id_unknown(self, network_id):
if network_id == self.nw_id_unknown:
raise NetworkAlreadyExist(network_id=network_id)
def list_networks(self):
return self.networks.list_networks()
def update_network(self, network_id):
self._check_nw_id_unknown(network_id)
self.networks.update_network(network_id)
def create_network(self, network_id):
self._check_nw_id_unknown(network_id)
self.networks.create_network(network_id)
def remove_network(self, network_id):
self.networks.remove_network(network_id)
def list_ports(self, network_id):
return self.networks.list_ports(network_id)
def list_ports_noraise(self, network_id):
try:
return self.list_ports(network_id)
except NetworkNotFound:
return []
def _update_port(self, network_id, dpid, port, port_may_exist):
def _known_nw_id(nw_id):
return nw_id is not None and nw_id != self.nw_id_unknown
queue_add_event = False
self._check_nw_id_unknown(network_id)
try:
old_network_id = self.dpids.get_network_safe(dpid, port)
if (self.networks.has_port(network_id, dpid, port) or
_known_nw_id(old_network_id)):
if not port_may_exist:
raise PortAlreadyExist(network_id=network_id,
dpid=dpid, port=port)
if old_network_id != network_id:
queue_add_event = True
self.networks.add_raw(network_id, dpid, port)
if _known_nw_id(old_network_id):
self.networks.remove_raw(old_network_id, dpid, port)
except KeyError:
raise NetworkNotFound(network_id=network_id)
self.dpids.update_port(dpid, port, network_id)
if queue_add_event:
self.networks.add_event(network_id, dpid, port)
def create_port(self, network_id, dpid, port):
self._update_port(network_id, dpid, port, False)
def update_port(self, network_id, dpid, port):
self._update_port(network_id, dpid, port, True)
def _get_old_mac(self, network_id, dpid, port_no):
try:
port = self.dpids.get_port(dpid, port_no)
except PortNotFound:
pass
else:
if port.network_id == network_id:
return port.mac_address
return None
def remove_port(self, network_id, dpid, port_no):
# generate event first, then do the real task
old_mac_address = self._get_old_mac(network_id, dpid, port_no)
self.dpids.remove_port(dpid, port_no)
try:
self.networks.remove(network_id, dpid, port_no)
except NetworkNotFound:
# port deletion can be called after network deletion
# due to Openstack auto deletion port.(dhcp/router port)
pass
if old_mac_address is not None:
self.mac_addresses.remove_port(network_id, dpid, port_no,
old_mac_address)
#
# methods for gre tunnel
#
def get_dpids(self, network_id):
return self.networks.get_dpids(network_id)
def has_network(self, network_id):
return self.networks.has_network(network_id)
def get_networks(self, dpid):
return self.dpids.get_networks(dpid)
def create_mac(self, network_id, dpid, port_no, mac_address):
self.mac_addresses.add_port(network_id, dpid, port_no, mac_address)
self.dpids.set_mac(network_id, dpid, port_no, mac_address)
def update_mac(self, network_id, dpid, port_no, mac_address):
old_mac_address = self._get_old_mac(network_id, dpid, port_no)
self.dpids.update_mac(network_id, dpid, port_no, mac_address)
if old_mac_address is not None:
self.mac_addresses.remove_port(network_id, dpid, port_no,
old_mac_address)
self.mac_addresses.add_port(network_id, dpid, port_no, mac_address)
def get_mac(self, dpid, port_no):
return self.dpids.get_mac(dpid, port_no)
def list_mac(self, dpid, port_no):
mac_address = self.dpids.get_mac(dpid, port_no)
if mac_address is None:
return []
return [mac_address]
def get_ports(self, dpid, network_id=None, mac_address=None):
return self.dpids.get_ports(dpid, network_id, mac_address)
def get_port(self, dpid, port_no):
return self.dpids.get_port(dpid, port_no)
def get_ports_with_mac(self, network_id, mac_address):
return self.mac_addresses.get_ports(network_id, mac_address)
#
# methods for simple_isolation
#
def same_network(self, dpid, nw_id, out_port, allow_nw_id_external=None):
assert nw_id != self.nw_id_unknown
out_nw = self.dpids.get_network_safe(dpid, out_port)
if nw_id == out_nw:
return True
if (allow_nw_id_external is not None and
(allow_nw_id_external == nw_id or
allow_nw_id_external == out_nw)):
# allow external network -> known network id
return True
self.logger.debug('blocked dpid %s nw_id %s out_port %d out_nw %s'
'external %s',
dpid, nw_id, out_port, out_nw, allow_nw_id_external)
return False
def get_network(self, dpid, port):
return self.dpids.get_network(dpid, port)
def add_datapath(self, ofp_switch_features):
datapath = ofp_switch_features.datapath
dpid = ofp_switch_features.datapath_id
ports = ofp_switch_features.ports
self.dpids.setdefault_dpid(dpid)
for port_no in ports:
self.port_added(datapath, port_no)
def port_added(self, datapath, port_no):
if port_no == 0 or port_no >= datapath.ofproto.OFPP_MAX:
# skip fake output ports
return
self.dpids.setdefault_network(datapath.id, port_no)
def port_deleted(self, dpid, port_no):
self.dpids.remove_port(dpid, port_no)
def filter_ports(self, dpid, in_port, nw_id, allow_nw_id_external=None):
assert nw_id != self.nw_id_unknown
ret = []
for port in self.get_ports(dpid):
nw_id_ = port.network_id
if port.port_no == in_port:
continue
if nw_id_ == nw_id:
ret.append(port.port_no)
elif (allow_nw_id_external is not None and
nw_id_ == allow_nw_id_external):
ret.append(port.port_no)
return ret
| apache-2.0 |
eranchetz/nupic | tests/unit/nupic/data/filters_test.py | 35 | 5588 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for filters module.
NOTE: This test was migrated from the old repo and could use some refactoring.
"""
from datetime import datetime
import numpy
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.regions.RecordSensor import RecordSensor
from nupic.data.file_record_stream import FileRecordStream
from nupic.encoders import MultiEncoder
from nupic.data.filters import DeltaFilter
class FiltersTest(unittest.TestCase):
@unittest.skip("Disabled until we figure out why it is failing in internal"
" tests")
def testDeltaFilter(self):
"""
data looks like: should generate deltas
"t" "s" "dt" "ds"
t 10 X
t+1s 20 1s 10
t+1d 50 86399 30
r t+1d+1s 60 X
r+1d+3s 65 2s 5
"""
r = RecordSensor()
filename = resource_filename("nupic.datafiles", "extra/qa/delta.csv")
datasource = FileRecordStream(filename)
r.dataSource = datasource
n = 50
encoder = MultiEncoder({'blah': dict(fieldname="s", type='ScalarEncoder',
n=n, w=11, minval=0, maxval=100)})
r.encoder = encoder
# Test #1 -- no deltas
# Make sure we get a reset when the gym changes
resetOut = numpy.zeros((1,), dtype='float')
sequenceIdOut = numpy.zeros((1,), dtype='float')
dataOut = numpy.zeros((n,), dtype='float')
sourceOut = numpy.zeros((1,), dtype='float')
categoryOut = numpy.zeros((1,), dtype='float')
outputs = dict(resetOut=resetOut,
sourceOut = sourceOut,
sequenceIdOut = sequenceIdOut,
dataOut = dataOut,
categoryOut = categoryOut)
inputs = dict()
r.verbosity=0
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16,
minute=8, second=0))
self.assertEqual(lr['s'], 10)
self.assertEqual(lr['_reset'], 1)
self.assertTrue('dt' not in lr)
self.assertTrue('ds' not in lr)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16,
minute=8, second=1))
self.assertEqual(lr['s'], 20)
self.assertEqual(lr['_reset'], 0)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=0))
self.assertEqual(lr['s'], 50)
self.assertEqual(lr['_reset'], 0)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=1))
self.assertEqual(lr['s'], 60)
self.assertEqual(lr['_reset'], 1)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=3))
self.assertEqual(lr['s'], 65)
self.assertEqual(lr['_reset'], 0)
# Add filters
r.preEncodingFilters = [DeltaFilter("s", "ds"), DeltaFilter("t", "dt")]
r.rewind()
# skip first record, which has a reset
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16,
minute=8, second=1))
self.assertEqual(lr['s'], 20)
self.assertEqual(lr['_reset'], 1) # this record should have a reset since
# it is first of a sequence
self.assertEqual(lr['dt'], 1)
self.assertEqual(lr['ds'], 10)
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=0))
self.assertEqual(lr['s'], 50)
self.assertEqual(lr['_reset'], 0)
self.assertEqual(lr['dt'], 3600 * 24 - 1)
self.assertEqual(lr['ds'], 30)
# next reset record is skipped
r.compute(inputs, outputs)
lr = r.lastRecord
self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16,
minute=8, second=3))
self.assertEqual(lr['s'], 65)
self.assertEqual(lr['_reset'], 1)
self.assertEqual(lr['dt'], 2)
self.assertEqual(lr['ds'], 5)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
Benrflanders/Pytris | pyglet/pyglet/input/base.py | 33 | 26060 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Interface classes for `pyglet.input`.
:since: pyglet 1.2
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
from pyglet.event import EventDispatcher
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
class DeviceException(Exception):
pass
class DeviceOpenException(DeviceException):
pass
class DeviceExclusiveException(DeviceException):
pass
class Device(object):
'''Input device.
:Ivariables:
`display` : `Display`
Display this device is connected to.
`name` : str
Name of the device, as described by the device firmware.
`manufacturer` : str
Name of the device manufacturer, or ``None`` if the information is
not available.
'''
def __init__(self, display, name):
self.display = display
self.name = name
self.manufacturer = None
# TODO: make private
self.is_open = False
def open(self, window=None, exclusive=False):
'''Open the device to begin receiving input from it.
:Parameters:
`window` : Window
Optional window to associate with the device. The behaviour
of this parameter is device and operating system dependant.
It can usually be omitted for most devices.
`exclusive` : bool
If ``True`` the device will be opened exclusively so that no
other application can use it. The method will raise
`DeviceExclusiveException` if the device cannot be opened this
way (for example, because another application has already
opened it).
'''
if self.is_open:
raise DeviceOpenException('Device is already open.')
self.is_open = True
def close(self):
'''Close the device.
'''
self.is_open = False
def get_controls(self):
'''Get a list of controls provided by the device.
:rtype: list of `Control`
'''
raise NotImplementedError('abstract')
def __repr__(self):
return '%s(name=%s)' % (self.__class__.__name__, self.name)
class Control(EventDispatcher):
'''Single value input provided by a device.
A control's value can be queried when the device is open. Event handlers
can be attached to the control to be called when the value changes.
The `min` and `max` properties are provided as advertised by the
device; in some cases the control's value will be outside this range.
:Ivariables:
`name` : str
Name of the control, or ``None`` if unknown
`raw_name` : str
Unmodified name of the control, as presented by the operating
system; or ``None`` if unknown.
`inverted` : bool
If ``True``, the value reported is actually inverted from what the
device reported; usually this is to provide consistency across
operating systems.
'''
_value = None
def __init__(self, name, raw_name=None):
self.name = name
self.raw_name = raw_name
self.inverted = False
def _get_value(self):
return self._value
def _set_value(self, value):
if value == self._value:
return
self._value = value
self.dispatch_event('on_change', value)
value = property(_get_value, doc='''Current value of the control.
The range of the value is device-dependent; for absolute controls
the range is given by ``min`` and ``max`` (however the value may exceed
this range); for relative controls the range is undefined.
:type: float''')
def __repr__(self):
if self.name:
return '%s(name=%s, raw_name=%s)' % (
self.__class__.__name__, self.name, self.raw_name)
else:
return '%s(raw_name=%s)' % (self.__class__.__name__, self.raw_name)
if _is_epydoc:
def on_change(self, value):
'''The value changed.
:Parameters:
`value` : float
Current value of the control.
:event:
'''
Control.register_event_type('on_change')
class RelativeAxis(Control):
'''An axis whose value represents a relative change from the previous
value.
'''
#: Name of the horizontal axis control
X = 'x'
#: Name of the vertical axis control
Y = 'y'
#: Name of the Z axis control.
Z = 'z'
#: Name of the rotational-X axis control
RX = 'rx'
#: Name of the rotational-Y axis control
RY = 'ry'
#: Name of the rotational-Z axis control
RZ = 'rz'
#: Name of the scroll wheel control
WHEEL = 'wheel'
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.dispatch_event('on_change', value)
value = property(_get_value)
class AbsoluteAxis(Control):
'''An axis whose value represents a physical measurement from the device.
The value is advertised to range over ``min`` and ``max``.
:Ivariables:
`min` : float
Minimum advertised value.
`max` : float
Maximum advertised value.
'''
#: Name of the horizontal axis control
X = 'x'
#: Name of the vertical axis control
Y = 'y'
#: Name of the Z axis control.
Z = 'z'
#: Name of the rotational-X axis control
RX = 'rx'
#: Name of the rotational-Y axis control
RY = 'ry'
#: Name of the rotational-Z axis control
RZ = 'rz'
#: Name of the hat (POV) control, when a single control enumerates all of
#: the hat's positions.
HAT = 'hat'
#: Name of the hat's (POV's) horizontal control, when the hat position is
#: described by two orthogonal controls.
HAT_X = 'hat_x'
#: Name of the hat's (POV's) vertical control, when the hat position is
#: described by two orthogonal controls.
HAT_Y = 'hat_y'
def __init__(self, name, min, max, raw_name=None):
super(AbsoluteAxis, self).__init__(name, raw_name)
self.min = min
self.max = max
class Button(Control):
'''A control whose value is boolean.
'''
def _get_value(self):
return bool(self._value)
def _set_value(self, value):
if value == self._value:
return
self._value = value
self.dispatch_event('on_change', bool(value))
if value:
self.dispatch_event('on_press')
else:
self.dispatch_event('on_release')
value = property(_get_value)
if _is_epydoc:
def on_press(self):
'''The button was pressed.
:event:
'''
def on_release(self):
'''The button was released.
:event:
'''
Button.register_event_type('on_press')
Button.register_event_type('on_release')
class Joystick(EventDispatcher):
'''High-level interface for joystick-like devices. This includes analogue
and digital joysticks, gamepads, game controllers, and possibly even
steering wheels and other input devices. There is unfortunately no way to
distinguish between these different device types.
To use a joystick, first call `open`, then in your game loop examine
the values of `x`, `y`, and so on. These values are normalized to the
range [-1.0, 1.0].
To receive events when the value of an axis changes, attach an
on_joyaxis_motion event handler to the joystick. The `Joystick` instance,
axis name, and current value are passed as parameters to this event.
To handle button events, you should attach on_joybutton_press and
on_joy_button_release event handlers to the joystick. Both the `Joystick`
instance and the index of the changed button are passed as parameters to
these events.
Alternately, you may attach event handlers to each individual button in
`button_controls` to receive on_press or on_release events.
To use the hat switch, attach an on_joyhat_motion event handler to the
joystick. The handler will be called with both the hat_x and hat_y values
whenever the value of the hat switch changes.
The device name can be queried to get the name of the joystick.
:Ivariables:
`device` : `Device`
The underlying device used by this joystick interface.
`x` : float
Current X (horizontal) value ranging from -1.0 (left) to 1.0
(right).
`y` : float
Current y (vertical) value ranging from -1.0 (top) to 1.0
(bottom).
`z` : float
Current Z value ranging from -1.0 to 1.0. On joysticks the Z
value is usually the throttle control. On game controllers the Z
value is usually the secondary thumb vertical axis.
`rx` : float
Current rotational X value ranging from -1.0 to 1.0.
`ry` : float
Current rotational Y value ranging from -1.0 to 1.0.
`rz` : float
Current rotational Z value ranging from -1.0 to 1.0. On joysticks
the RZ value is usually the twist of the stick. On game
controllers the RZ value is usually the secondary thumb horizontal
axis.
`hat_x` : int
Current hat (POV) horizontal position; one of -1 (left), 0
(centered) or 1 (right).
`hat_y` : int
Current hat (POV) vertical position; one of -1 (bottom), 0
(centered) or 1 (top).
`buttons` : list of bool
List of boolean values representing current states of the buttons.
These are in order, so that button 1 has value at ``buttons[0]``,
and so on.
`x_control` : `AbsoluteAxis`
Underlying control for `x` value, or ``None`` if not available.
`y_control` : `AbsoluteAxis`
Underlying control for `y` value, or ``None`` if not available.
`z_control` : `AbsoluteAxis`
Underlying control for `z` value, or ``None`` if not available.
`rx_control` : `AbsoluteAxis`
Underlying control for `rx` value, or ``None`` if not available.
`ry_control` : `AbsoluteAxis`
Underlying control for `ry` value, or ``None`` if not available.
`rz_control` : `AbsoluteAxis`
Underlying control for `rz` value, or ``None`` if not available.
`hat_x_control` : `AbsoluteAxis`
Underlying control for `hat_x` value, or ``None`` if not available.
`hat_y_control` : `AbsoluteAxis`
Underlying control for `hat_y` value, or ``None`` if not available.
`button_controls` : list of `Button`
Underlying controls for `buttons` values.
'''
def __init__(self, device):
self.device = device
self.x = 0
self.y = 0
self.z = 0
self.rx = 0
self.ry = 0
self.rz = 0
self.hat_x = 0
self.hat_y = 0
self.buttons = []
self.x_control = None
self.y_control = None
self.z_control = None
self.rx_control = None
self.ry_control = None
self.rz_control = None
self.hat_x_control = None
self.hat_y_control = None
self.button_controls = []
def add_axis(control):
name = control.name
scale = 2.0 / (control.max - control.min)
bias = -1.0 - control.min * scale
if control.inverted:
scale = -scale
bias = -bias
setattr(self, name + '_control', control)
@control.event
def on_change(value):
normalized_value = value * scale + bias
setattr(self, name, normalized_value)
self.dispatch_event('on_joyaxis_motion', self, name, normalized_value)
def add_button(control):
i = len(self.buttons)
self.buttons.append(False)
self.button_controls.append(control)
@control.event
def on_change(value):
self.buttons[i] = value
@control.event
def on_press():
self.dispatch_event('on_joybutton_press', self, i)
@control.event
def on_release():
self.dispatch_event('on_joybutton_release', self, i)
def add_hat(control):
# 8-directional hat encoded as a single control (Windows/Mac)
self.hat_x_control = control
self.hat_y_control = control
@control.event
def on_change(value):
if value & 0xffff == 0xffff:
self.hat_x = self.hat_y = 0
else:
if control.max > 8: # DirectInput: scale value
value //= 0xfff
if 0 <= value < 8:
self.hat_x, self.hat_y = (
( 0, 1),
( 1, 1),
( 1, 0),
( 1, -1),
( 0, -1),
(-1, -1),
(-1, 0),
(-1, 1),
)[value]
else:
# Out of range
self.hat_x = self.hat_y = 0
self.dispatch_event('on_joyhat_motion', self, self.hat_x, self.hat_y)
for control in device.get_controls():
if isinstance(control, AbsoluteAxis):
if control.name in ('x', 'y', 'z', 'rx', 'ry', 'rz',
'hat_x', 'hat_y'):
add_axis(control)
elif control.name == 'hat':
add_hat(control)
elif isinstance(control, Button):
add_button(control)
def open(self, window=None, exclusive=False):
'''Open the joystick device. See `Device.open`.
'''
self.device.open(window, exclusive)
def close(self):
'''Close the joystick device. See `Device.close`.
'''
self.device.close()
def on_joyaxis_motion(self, joystick, axis, value):
'''The value of a joystick axis changed.
:Parameters:
`joystick` : `Joystick`
The joystick device whose axis changed.
`axis` : string
The name of the axis that changed.
`value` : float
The current value of the axis, normalized to [-1, 1].
'''
def on_joybutton_press(self, joystick, button):
'''A button on the joystick was pressed.
:Parameters:
`joystick` : `Joystick`
The joystick device whose button was pressed.
`button` : int
The index (in `button_controls`) of the button that was pressed.
'''
def on_joybutton_release(self, joystick, button):
'''A button on the joystick was released.
:Parameters:
`joystick` : `Joystick`
The joystick device whose button was released.
`button` : int
The index (in `button_controls`) of the button that was released.
'''
def on_joyhat_motion(self, joystick, hat_x, hat_y):
'''The value of the joystick hat switch changed.
:Parameters:
`joystick` : `Joystick`
The joystick device whose hat control changed.
`hat_x` : int
Current hat (POV) horizontal position; one of -1 (left), 0
(centered) or 1 (right).
`hat_y` : int
Current hat (POV) vertical position; one of -1 (bottom), 0
(centered) or 1 (top).
'''
Joystick.register_event_type('on_joyaxis_motion')
Joystick.register_event_type('on_joybutton_press')
Joystick.register_event_type('on_joybutton_release')
Joystick.register_event_type('on_joyhat_motion')
class AppleRemote(EventDispatcher):
'''High-level interface for Apple remote control.
This interface provides access to the 6 button controls on the remote.
Pressing and holding certain buttons on the remote is interpreted as
a separate control.
:Ivariables:
`device` : `Device`
The underlying device used by this interface.
`left_control` : `Button`
Button control for the left (prev) button.
`left_hold_control` : `Button`
Button control for holding the left button (rewind).
`right_control` : `Button`
Button control for the right (next) button.
`right_hold_control` : `Button`
Button control for holding the right button (fast forward).
`up_control` : `Button`
Button control for the up (volume increase) button.
`down_control` : `Button`
Button control for the down (volume decrease) button.
`select_control` : `Button`
Button control for the select (play/pause) button.
`select_hold_control` : `Button`
Button control for holding the select button.
`menu_control` : `Button`
Button control for the menu button.
`menu_hold_control` : `Button`
Button control for holding the menu button.
'''
def __init__(self, device):
def add_button(control):
setattr(self, control.name + '_control', control)
@control.event
def on_press():
self.dispatch_event('on_button_press', control.name)
@control.event
def on_release():
self.dispatch_event('on_button_release', control.name)
self.device = device
for control in device.get_controls():
if control.name in ('left', 'left_hold', 'right', 'right_hold', 'up', 'down',
'menu', 'select', 'menu_hold', 'select_hold'):
add_button(control)
def open(self, window=None, exclusive=False):
'''Open the device. See `Device.open`.
'''
self.device.open(window, exclusive)
def close(self):
'''Close the device. See `Device.close`.
'''
self.device.close()
def on_button_press(self, button):
"""A button on the remote was pressed.
Only the 'up' and 'down' buttons will generate an event when the
button is first pressed. All other buttons on the remote will wait
until the button is released and then send both the press and release
events at the same time.
:Parameters:
`button` : unicode
The name of the button that was pressed. The valid names are
'up', 'down', 'left', 'right', 'left_hold', 'right_hold',
'menu', 'menu_hold', 'select', and 'select_hold'
:event:
"""
def on_button_release(self, button):
"""A button on the remote was released.
The 'select_hold' and 'menu_hold' button release events are sent
immediately after the corresponding press events regardless of
whether or not the user has released the button.
:Parameters:
`button` : unicode
The name of the button that was released. The valid names are
'up', 'down', 'left', 'right', 'left_hold', 'right_hold',
'menu', 'menu_hold', 'select', and 'select_hold'
:event:
"""
AppleRemote.register_event_type('on_button_press')
AppleRemote.register_event_type('on_button_release')
class Tablet(object):
'''High-level interface to tablet devices.
Unlike other devices, tablets must be opened for a specific window,
and cannot be opened exclusively. The `open` method returns a
`TabletCanvas` object, which supports the events provided by the tablet.
Currently only one tablet device can be used, though it can be opened on
multiple windows. If more than one tablet is connected, the behaviour is
undefined.
'''
def open(self, window):
'''Open a tablet device for a window.
:Parameters:
`window` : `Window`
The window on which the tablet will be used.
:rtype: `TabletCanvas`
'''
raise NotImplementedError('abstract')
class TabletCanvas(EventDispatcher):
'''Event dispatcher for tablets.
Use `Tablet.open` to obtain this object for a particular tablet device and
window. Events may be generated even if the tablet stylus is outside of
the window; this is operating-system dependent.
The events each provide the `TabletCursor` that was used to generate the
event; for example, to distinguish between a stylus and an eraser. Only
one cursor can be used at a time, otherwise the results are undefined.
:Ivariables:
`window` : Window
The window on which this tablet was opened.
'''
# OS X: Active window receives tablet events only when cursor is in window
# Windows: Active window receives all tablet events
#
# Note that this means enter/leave pairs are not always consistent (normal
# usage).
def __init__(self, window):
self.window = window
def close(self):
'''Close the tablet device for this window.
'''
raise NotImplementedError('abstract')
if _is_epydoc:
def on_enter(self, cursor):
'''A cursor entered the proximity of the window. The cursor may
be hovering above the tablet surface, but outside of the window
bounds, or it may have entered the window bounds.
Note that you cannot rely on `on_enter` and `on_leave` events to
be generated in pairs; some events may be lost if the cursor was
out of the window bounds at the time.
:Parameters:
`cursor` : `TabletCursor`
The cursor that entered proximity.
:event:
'''
def on_leave(self, cursor):
'''A cursor left the proximity of the window. The cursor may have
moved too high above the tablet surface to be detected, or it may
have left the bounds of the window.
Note that you cannot rely on `on_enter` and `on_leave` events to
be generated in pairs; some events may be lost if the cursor was
out of the window bounds at the time.
:Parameters:
`cursor` : `TabletCursor`
The cursor that left proximity.
:event:
'''
def on_motion(self, cursor, x, y, pressure):
'''The cursor moved on the tablet surface.
If `pressure` is 0, then the cursor is actually hovering above the
tablet surface, not in contact.
:Parameters:
`cursor` : `TabletCursor`
The cursor that moved.
`x` : int
The X position of the cursor, in window coordinates.
`y` : int
The Y position of the cursor, in window coordinates.
`pressure` : float
The pressure applied to the cursor, in range 0.0 (no
pressure) to 1.0 (full pressure).
`tilt_x` : float
Currently undefined.
`tilt_y` : float
Currently undefined.
:event:
'''
TabletCanvas.register_event_type('on_enter')
TabletCanvas.register_event_type('on_leave')
TabletCanvas.register_event_type('on_motion')
class TabletCursor(object):
'''A distinct cursor used on a tablet.
Most tablets support at least a *stylus* and an *erasor* cursor; this
object is used to distinguish them when tablet events are generated.
:Ivariables:
`name` : str
Name of the cursor.
'''
# TODO well-defined names for stylus and eraser.
def __init__(self, name):
self.name = name
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
| mit |
glove747/liberty-neutron | neutron/agent/linux/interface.py | 3 | 19615 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import six
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import ipv6_utils
from neutron.i18n import _LE, _LI
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('ovs_integration_bridge',
default='br-int',
help=_('Name of Open vSwitch bridge to use')),
cfg.BoolOpt('ovs_use_veth',
default=False,
help=_('Uses veth for an interface or not')),
cfg.IntOpt('network_device_mtu',
help=_('MTU setting for device.')),
]
@six.add_metaclass(abc.ABCMeta)
class LinuxInterfaceDriver(object):
# from linux IF_NAMESIZE
DEV_NAME_LEN = 14
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
def __init__(self, conf):
self.conf = conf
if self.conf.network_device_mtu:
self._validate_network_device_mtu()
def _validate_network_device_mtu(self):
if (ipv6_utils.is_enabled() and
self.conf.network_device_mtu < n_const.IPV6_MIN_MTU):
LOG.error(_LE("IPv6 protocol requires a minimum MTU of "
"%(min_mtu)s, while the configured value is "
"%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU,
'current_mtu': self.conf.network_device_mtu})
raise SystemExit(1)
@property
def use_gateway_ips(self):
"""Whether to use gateway IPs instead of unique IP allocations.
In each place where the DHCP agent runs, and for each subnet for
which DHCP is handling out IP addresses, the DHCP port needs -
at the Linux level - to have an IP address within that subnet.
Generally this needs to be a unique Neutron-allocated IP
address, because the subnet's underlying L2 domain is bridged
across multiple compute hosts and network nodes, and for HA
there may be multiple DHCP agents running on that same bridged
L2 domain.
However, if the DHCP ports - on multiple compute/network nodes
but for the same network - are _not_ bridged to each other,
they do not need each to have a unique IP address. Instead
they can all share the same address from the relevant subnet.
This works, without creating any ambiguity, because those
ports are not all present on the same L2 domain, and because
no data within the network is ever sent to that address.
(DHCP requests are broadcast, and it is the network's job to
ensure that such a broadcast will reach at least one of the
available DHCP servers. DHCP responses will be sent _from_
the DHCP port address.)
Specifically, for networking backends where it makes sense,
the DHCP agent allows all DHCP ports to use the subnet's
gateway IP address, and thereby to completely avoid any unique
IP address allocation. This behaviour is selected by running
the DHCP agent with a configured interface driver whose
'use_gateway_ips' property is True.
When an operator deploys Neutron with an interface driver that
makes use_gateway_ips True, they should also ensure that a
gateway IP address is defined for each DHCP-enabled subnet,
and that the gateway IP address doesn't change during the
subnet's lifetime.
"""
return False
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=None, clean_connections=False):
"""Set the L3 settings for the interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
"""
preserve_ips = preserve_ips or []
device = ip_lib.IPDevice(device_name, namespace=namespace)
# The LLA generated by the operating system is not known to
# Neutron, so it would be deleted if we added it to the 'previous'
# list here
default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address)
previous = {addr['cidr'] for addr in device.addr.list(
filters=['permanent'])} - {default_ipv6_lla}
# add new addresses
for ip_cidr in ip_cidrs:
net = netaddr.IPNetwork(ip_cidr)
# Convert to compact IPv6 address because the return values of
# "ip addr list" are compact.
if net.version == 6:
ip_cidr = str(net)
if ip_cidr in previous:
previous.remove(ip_cidr)
continue
device.addr.add(ip_cidr)
# clean up any old addresses
for ip_cidr in previous:
if ip_cidr not in preserve_ips:
if clean_connections:
device.delete_addr_and_conntrack_state(ip_cidr)
else:
device.addr.delete(ip_cidr)
def init_router_port(self,
device_name,
ip_cidrs,
namespace,
preserve_ips=None,
extra_subnets=None,
clean_connections=False):
"""Set the L3 settings for a router interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
extra_subnets: An iterable of cidrs to add as routes without address
"""
LOG.debug("init_router_port: device_name(%s), namespace(%s)",
device_name, namespace)
self.init_l3(device_name=device_name,
ip_cidrs=ip_cidrs,
namespace=namespace,
preserve_ips=preserve_ips or [],
clean_connections=clean_connections)
device = ip_lib.IPDevice(device_name, namespace=namespace)
# Manage on-link routes (routes without an associated address)
new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
v4_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_4)
v6_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_6)
existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink)
for route in new_onlink_cidrs - existing_onlink_cidrs:
LOG.debug("adding onlink route(%s)", route)
device.route.add_onlink_route(route)
for route in existing_onlink_cidrs - new_onlink_cidrs:
LOG.debug("deleting onlink route(%s)", route)
device.route.delete_onlink_route(route)
def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
net = netaddr.IPNetwork(v6addr)
device.addr.add(str(net), scope)
def delete_ipv6_addr(self, device_name, v6addr, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
device.delete_addr_and_conntrack_state(v6addr)
def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace):
"""Delete the first listed IPv6 address that falls within a given
prefix.
"""
device = ip_lib.IPDevice(device_name, namespace=namespace)
net = netaddr.IPNetwork(prefix)
for address in device.addr.list(scope='global', filters=['permanent']):
ip_address = netaddr.IPNetwork(address['cidr'])
if ip_address in net:
device.delete_addr_and_conntrack_state(address['cidr'])
break
def get_ipv6_llas(self, device_name, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
return device.addr.list(scope='link', ip_version=6)
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exceptions.BridgeDoesNotExist(bridge=bridge)
def get_device_name(self, port):
return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
@staticmethod
def configure_ipv6_ra(namespace, dev_name):
"""Configure acceptance of IPv6 route advertisements on an intf."""
# Learn the default router's IP address via RAs
ip_lib.IPWrapper(namespace=namespace).netns.execute(
['sysctl', '-w', 'net.ipv6.conf.%s.accept_ra=2' % dev_name])
@abc.abstractmethod
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface only for new devices that don't exist yet."""
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
if not ip_lib.device_exists(device_name,
namespace=namespace):
self.plug_new(network_id, port_id, device_name, mac_address,
bridge, namespace, prefix)
else:
LOG.info(_LI("Device %s already exists"), device_name)
@abc.abstractmethod
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
@property
def bridged(self):
"""Whether the DHCP port is bridged to the VM TAP interfaces.
When the DHCP port is bridged to the TAP interfaces for the
VMs for which it is providing DHCP service - as is the case
for most Neutron network implementations - the DHCP server
only needs to listen on the DHCP port, and will still receive
DHCP requests from all the relevant VMs.
If the DHCP port is not bridged to the relevant VM TAP
interfaces, the DHCP server needs to listen explicitly on
those TAP interfaces, and to treat those as aliases of the
DHCP port where the IP subnet is defined.
"""
return True
class NullDriver(LinuxInterfaceDriver):
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
pass
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
pass
class OVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an OVS bridge."""
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
def __init__(self, conf):
super(OVSInterfaceDriver, self).__init__(conf)
if self.conf.ovs_use_veth:
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
if self.conf.ovs_use_veth:
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
n_const.TAP_DEVICE_PREFIX)
return dev_name
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
internal=True):
attrs = [('external_ids', {'iface-id': port_id,
'iface-status': 'active',
'attached-mac': mac_address})]
if internal:
attrs.insert(0, ('type', 'internal'))
ovs = ovs_lib.OVSBridge(bridge)
ovs.replace_port(device_name, *attrs)
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
self.check_bridge_exists(bridge)
ip = ip_lib.IPWrapper()
tap_name = self._get_tap_name(device_name, prefix)
if self.conf.ovs_use_veth:
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name,
device_name,
namespace2=namespace)
else:
ns_dev = ip.device(device_name)
internal = not self.conf.ovs_use_veth
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
internal=internal)
ns_dev.link.set_address(mac_address)
if self.conf.network_device_mtu:
ns_dev.link.set_mtu(self.conf.network_device_mtu)
if self.conf.ovs_use_veth:
root_dev.link.set_mtu(self.conf.network_device_mtu)
# Add an interface created by ovs to the namespace.
if not self.conf.ovs_use_veth and namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
if self.conf.ovs_use_veth:
root_dev.link.set_up()
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
tap_name = self._get_tap_name(device_name, prefix)
self.check_bridge_exists(bridge)
ovs = ovs_lib.OVSBridge(bridge)
try:
ovs.delete_port(tap_name)
if self.conf.ovs_use_veth:
device = ip_lib.IPDevice(device_name, namespace=namespace)
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
class MidonetInterfaceDriver(LinuxInterfaceDriver):
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""This method is called by the Dhcp agent or by the L3 agent
when a new network is created
"""
ip = ip_lib.IPWrapper()
tap_name = device_name.replace(prefix or n_const.TAP_DEVICE_PREFIX,
n_const.TAP_DEVICE_PREFIX)
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name, device_name,
namespace2=namespace)
ns_dev.link.set_address(mac_address)
# Add an interface created by ovs to the namespace.
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
root_dev.link.set_up()
cmd = ['mm-ctl', '--bind-port', port_id, device_name]
utils.execute(cmd, run_as_root=True)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
# the port will be deleted by the dhcp agent that will call the plugin
device = ip_lib.IPDevice(device_name, namespace=namespace)
try:
device.link.delete()
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"), device_name)
LOG.debug("Unplugged interface '%s'", device_name)
ip_lib.IPWrapper(namespace=namespace).garbage_collect_namespace()
class IVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an IVS bridge."""
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
def __init__(self, conf):
super(IVSInterfaceDriver, self).__init__(conf)
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
n_const.TAP_DEVICE_PREFIX)
return dev_name
def _ivs_add_port(self, device_name, port_id, mac_address):
cmd = ['ivs-ctl', 'add-port', device_name]
utils.execute(cmd, run_as_root=True)
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
ip = ip_lib.IPWrapper()
tap_name = self._get_tap_name(device_name, prefix)
root_dev, ns_dev = ip.add_veth(tap_name, device_name)
self._ivs_add_port(tap_name, port_id, mac_address)
ns_dev = ip.device(device_name)
ns_dev.link.set_address(mac_address)
if self.conf.network_device_mtu:
ns_dev.link.set_mtu(self.conf.network_device_mtu)
root_dev.link.set_mtu(self.conf.network_device_mtu)
if namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
root_dev.link.set_up()
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
tap_name = self._get_tap_name(device_name, prefix)
try:
cmd = ['ivs-ctl', 'del-port', tap_name]
utils.execute(cmd, run_as_root=True)
device = ip_lib.IPDevice(device_name, namespace=namespace)
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
class BridgeInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating bridge interfaces."""
DEV_NAME_PREFIX = 'ns-'
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plugin the interface."""
ip = ip_lib.IPWrapper()
# Enable agent to define the prefix
tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
n_const.TAP_DEVICE_PREFIX)
# Create ns_veth in a namespace if one is configured.
root_veth, ns_veth = ip.add_veth(tap_name, device_name,
namespace2=namespace)
ns_veth.link.set_address(mac_address)
if self.conf.network_device_mtu:
root_veth.link.set_mtu(self.conf.network_device_mtu)
ns_veth.link.set_mtu(self.conf.network_device_mtu)
root_veth.link.set_up()
ns_veth.link.set_up()
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
device = ip_lib.IPDevice(device_name, namespace=namespace)
try:
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
| apache-2.0 |
baidu/palo | tools/row_to_column/convert_row_to_column.py | 3 | 3463 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ConfigParser
import json
import os
import re
import sys
import time
from urllib import urlopen
import MySQLdb
class convert_row_to_column(object):
def connect(self, host, port, http_port, username, password):
"""
Use MySQLdb to connect to PALO
"""
self.host = host
self.port = port
self.http_port = http_port
self.username = username
self.passwd = password
try:
self.db = MySQLdb.connect(host=self.host, port=self.port,
user=self.username,
passwd=self.passwd)
self.cur = self.db.cursor()
except MySQLdb.Error as e:
print ("error %s:%s" % (str(e.args[0]), e.args[1]))
def close(self):
if self.db.open:
self.cur.close()
self.db.close()
def run(self):
url_list = "http://%s:%s@%s:%s/api/_get_ddl?db=default_cluster" % (
self.username, self.passwd, self.host, self.http_port)
url = None
show_databases_sql = "show databases"
self.cur.execute(show_databases_sql)
databases = self.cur.fetchall()
for database_tuple in databases :
#for database in ["habo_db", "tieba_recommend"]:
database = database_tuple[0]
show_tables_sql = "show tables from `" + database + "`"
self.cur.execute(show_tables_sql)
for table_tuple in self.cur:
table = table_tuple[0]
url = "%s:%s&tbl=%s" % (url_list, database, table)
try:
doc = urlopen(url).read();
doc = json.loads(doc)
except Exception as err:
print "url: %s, error: %s" % (url, err)
continue
create_table_stmt = doc["TABLE"]
ddl = create_table_stmt[0].encode("utf-8")
if ddl.find("\"storage_type\" = \"ROW\"") != -1 :
table = re.search('CREATE TABLE `(.*)`', ddl).group(1)
print "alter table " + database + "." + table + " set(\"storage_type\"=\"column\");"
def main():
cf = ConfigParser.ConfigParser()
cf.read("./conf")
host = cf.get('cluster', 'fe_host')
port = int(cf.get('cluster', 'port'))
http_port = int(cf.get('cluster', 'http_port'))
user = cf.get('cluster', 'username')
passwd = cf.get('cluster', 'password')
converter = convert_row_to_column()
converter.connect(host, port, http_port, user, passwd)
converter.run();
converter.close()
if __name__ == '__main__':
main()
| apache-2.0 |
zfil/ansible | v1/ansible/runner/poller.py | 132 | 4480 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
from ansible import errors
class AsyncPoller(object):
""" Manage asynchronous jobs. """
def __init__(self, results, runner):
self.runner = runner
self.results = { 'contacted': {}, 'dark': {}}
self.hosts_to_poll = []
self.completed = False
# flag to determine if at least one host was contacted
self.active = False
# True to work with the `and` below
skipped = True
jid = None
for (host, res) in results['contacted'].iteritems():
if res.get('started', False):
self.hosts_to_poll.append(host)
jid = res.get('ansible_job_id', None)
self.runner.vars_cache[host]['ansible_job_id'] = jid
self.active = True
else:
skipped = skipped and res.get('skipped', False)
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['contacted'][host] = res
for (host, res) in results['dark'].iteritems():
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['dark'][host] = res
if not skipped:
if jid is None:
raise errors.AnsibleError("unexpected error: unable to determine jid")
if len(self.hosts_to_poll)==0:
raise errors.AnsibleError("unexpected error: no hosts to poll")
def poll(self):
""" Poll the job status.
Returns the changes in this iteration."""
self.runner.module_name = 'async_status'
self.runner.module_args = "jid={{ansible_job_id}}"
self.runner.pattern = "*"
self.runner.background = 0
self.runner.complex_args = None
self.runner.inventory.restrict_to(self.hosts_to_poll)
results = self.runner.run()
self.runner.inventory.lift_restriction()
hosts = []
poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
for (host, res) in results['contacted'].iteritems():
if res.get('started',False):
hosts.append(host)
poll_results['polled'][host] = res
else:
self.results['contacted'][host] = res
poll_results['contacted'][host] = res
if res.get('failed', False) or res.get('rc', 0) != 0:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
else:
self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
for (host, res) in results['dark'].iteritems():
self.results['dark'][host] = res
poll_results['dark'][host] = res
if host in self.hosts_to_poll:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
self.hosts_to_poll = hosts
if len(hosts)==0:
self.completed = True
return poll_results
def wait(self, seconds, poll_interval):
""" Wait a certain time for job completion, check status every poll_interval. """
# jid is None when all hosts were skipped
if not self.active:
return self.results
clock = seconds - poll_interval
while (clock >= 0 and not self.completed):
time.sleep(poll_interval)
poll_results = self.poll()
for (host, res) in poll_results['polled'].iteritems():
if res.get('started'):
self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
clock = clock - poll_interval
return self.results
| gpl-3.0 |
signed/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/tests/permissions.py | 231 | 1654 | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.contrib.auth.management import create_permissions
from django.contrib.auth import models as auth_models
from django.contrib.contenttypes import models as contenttypes_models
from django.core.management import call_command
from django.test import TestCase
class TestAuthPermissions(TestCase):
def tearDown(self):
# These tests mess with content types, but content type lookups
# are cached, so we need to make sure the effects of this test
# are cleaned up.
contenttypes_models.ContentType.objects.clear_cache()
def test_permission_register_order(self):
"""Test that the order of registered permissions doesn't break"""
# Changeset 14413 introduced a regression in the ordering of
# newly created permissions for objects. When loading a fixture
# after the initial creation (such as during unit tests), the
# expected IDs for the permissions may not match up, leading to
# SQL errors. This is ticket 14731
# Start with a clean slate and build the permissions as we
# expect to see them in the fixtures.
auth_models.Permission.objects.all().delete()
contenttypes_models.ContentType.objects.all().delete()
create_permissions(auth_models, [], verbosity=0)
create_permissions(contenttypes_models, [], verbosity=0)
stderr = StringIO()
call_command('loaddata', 'test_permissions.json',
verbosity=0, commit=False, stderr=stderr)
self.assertEqual(stderr.getvalue(), '')
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/tests/conftest.py | 21 | 1124 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Py.test hooks."""
from oauth2client import _helpers
def pytest_addoption(parser):
"""Adds the --gae-sdk option to py.test.
This is used to enable the GAE tests. This has to be in this conftest.py
due to the way py.test collects conftest files."""
parser.addoption('--gae-sdk')
def pytest_configure(config):
"""Py.test hook called before loading tests."""
# Default of POSITIONAL_WARNING is too verbose for testing
_helpers.positional_parameters_enforcement = _helpers.POSITIONAL_EXCEPTION
| gpl-3.0 |
freakynit/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/unittest2/case.py | 13 | 55709 | """Test case implementation"""
import sys
import collections
import contextlib
import difflib
import logging
import pprint
import re
import traceback
import types
import unittest
import warnings
import six
from six.moves import range
from unittest2 import result
from unittest2.util import (
safe_repr, safe_str, strclass,
unorderable_list_difference, _common_shorten_repr
)
from unittest2.compatibility import (
wraps, with_context, catch_warnings, raise_from
)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ShouldStop(Exception):
"""
The test should stop.
"""
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self, result=None):
self.expecting_failure = False
self.result = result
self.result_supports_subtests = hasattr(result, "addSubTest")
self.success = True
self.skipped = []
self.expectedFailure = None
self.errors = []
@contextlib.contextmanager
def testPartExecutor(self, test_case, isTest=False):
old_success = self.success
self.success = True
try:
yield
except KeyboardInterrupt:
raise
except SkipTest as e:
self.success = False
self.skipped.append((test_case, str(e)))
except _ShouldStop:
pass
except:
exc_info = sys.exc_info()
if self.expecting_failure:
self.expectedFailure = exc_info
else:
self.success = False
self.errors.append((test_case, exc_info))
# explicitly break a reference cycle:
# exc_info -> frame -> exc_info
exc_info = None
else:
if self.result_supports_subtests and self.success:
self.errors.append((test_case, None))
finally:
self.success = self.success and old_success
def _id(obj):
return obj
class_types = [type]
if getattr(types, 'ClassType', None):
class_types.append(types.ClassType)
class_types = tuple(class_types)
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, class_types):
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(test_item):
test_item.__unittest_expecting_failure__ = True
return test_item
class _BaseTestCaseContext:
def __init__(self, test_case):
self.test_case = test_case
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
class _AssertRaisesBaseContext(_BaseTestCaseContext):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
_BaseTestCaseContext.__init__(self, test_case)
self.expected = expected
self.failureException = test_case.failureException
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if expected_regex is not None:
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
#else:
# if getattr(traceback, 'clear_frames', None):
# traceback.clear_frames(tb)
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
raise self.failureException('%r does not match %r' %
(self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
raise self.failureException("%s not triggered by %s"
% (exc_name, self.obj_name))
else:
raise self.failureException("%s not triggered"
% exc_name )
class _TypeEqualityDict(object):
def __init__(self, testcase):
self.testcase = testcase
self._store = {}
def __setitem__(self, key, value):
self._store[key] = value
def __getitem__(self, key):
value = self._store[key]
if isinstance(value, six.string_types):
return getattr(self.testcase, value)
return value
def get(self, key, default=None):
if key in self._store:
return self[key]
return default
_LoggingWatcher = collections.namedtuple("_LoggingWatcher",
["records", "output"])
class _CapturingHandler(logging.Handler):
"""
A logging handler capturing all (raw and formatted) logging output.
"""
def __init__(self):
logging.Handler.__init__(self)
self.watcher = _LoggingWatcher([], [])
def flush(self):
pass
def emit(self, record):
self.watcher.records.append(record)
msg = self.format(record)
self.watcher.output.append(msg)
class _AssertLogsContext(_BaseTestCaseContext):
"""A context manager used to implement TestCase.assertLogs()."""
LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def __init__(self, test_case, logger_name, level):
_BaseTestCaseContext.__init__(self, test_case)
self.logger_name = logger_name
if level:
self.level = getattr(logging, str(level), level)
else:
self.level = logging.INFO
self.msg = None
def __enter__(self):
if isinstance(self.logger_name, logging.Logger):
logger = self.logger = self.logger_name
else:
logger = self.logger = logging.getLogger(self.logger_name)
formatter = logging.Formatter(self.LOGGING_FORMAT)
handler = _CapturingHandler()
handler.setFormatter(formatter)
self.watcher = handler.watcher
self.old_handlers = logger.handlers[:]
self.old_level = logger.level
self.old_propagate = logger.propagate
logger.handlers = [handler]
logger.setLevel(self.level)
logger.propagate = False
return handler.watcher
def __exit__(self, exc_type, exc_value, tb):
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if exc_type is not None:
# let unexpected exceptions pass through
return False
if len(self.watcher.records) == 0:
self._raiseFailure(
"no logs of level {0} or higher triggered on {1}"
.format(logging.getLevelName(self.level), self.logger.name))
class TestCase(unittest.TestCase):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcome = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" % \
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
self._subtest = None
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
if six.PY2:
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
self.addTypeEqualityFunc(six.text_type, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, test_case, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(test_case, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(test_case)
@contextlib.contextmanager
def subTest(self, msg=None, **params):
"""Return a context manager that will return the enclosed block
of code in a subtest identified by the optional message and
keyword parameters. A failure in the subtest marks the test
case as failed but resumes execution at the end of the enclosed
block, allowing further test code to be executed.
"""
if not self._outcome.result_supports_subtests:
yield
return
parent = self._subtest
if parent is None:
params_map = collections.ChainMap(params)
else:
params_map = parent.params.new_child(params)
self._subtest = _SubTest(self, msg, params_map)
try:
with self._outcome.testPartExecutor(self._subtest, isTest=True):
yield
if not self._outcome.success:
result = self._outcome.result
if result is not None and result.failfast:
raise _ShouldStop
elif self._outcome.expectedFailure:
# If the test is expecting a failure, we really want to
# stop now and register the expected failure.
raise _ShouldStop
finally:
self._subtest = parent
def _feedErrorsToResult(self, result, errors):
for test, exc_info in errors:
if isinstance(test, _SubTest):
result.addSubTest(test.test_case, test, exc_info)
elif exc_info is not None:
if issubclass(exc_info[0], self.failureException):
result.addFailure(test, exc_info)
else:
result.addError(test, exc_info)
def _addExpectedFailure(self, result, exc_info):
try:
addExpectedFailure = result.addExpectedFailure
except AttributeError:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
else:
addExpectedFailure(self, exc_info)
def _addUnexpectedSuccess(self, result):
try:
addUnexpectedSuccess = result.addUnexpectedSuccess
except AttributeError:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failure",
RuntimeWarning)
# We need to pass an actual exception and traceback to addFailure,
# otherwise the legacy result can choke.
try:
raise_from(_UnexpectedSuccess, None)
except _UnexpectedSuccess:
result.addFailure(self, sys.exc_info())
else:
addUnexpectedSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, self, skip_why)
finally:
result.stopTest(self)
return
expecting_failure = getattr(testMethod,
"__unittest_expecting_failure__", False)
outcome = _Outcome(result)
try:
self._outcome = outcome
with outcome.testPartExecutor(self):
self.setUp()
if outcome.success:
outcome.expecting_failure = expecting_failure
with outcome.testPartExecutor(self, isTest=True):
testMethod()
outcome.expecting_failure = False
with outcome.testPartExecutor(self):
self.tearDown()
self.doCleanups()
for test, reason in outcome.skipped:
self._addSkip(result, test, reason)
self._feedErrorsToResult(result, outcome.errors)
if outcome.success:
if expecting_failure:
if outcome.expectedFailure:
self._addExpectedFailure(result, outcome.expectedFailure)
else:
self._addUnexpectedSuccess(result)
else:
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
# explicitly break reference cycles:
# outcome.errors -> frame -> outcome -> outcome.errors
# outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
del outcome.errors[:]
outcome.expectedFailure = None
# clear the outcome, no more needed
self._outcome = None
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcome or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
with outcome.testPartExecutor(self):
function(*args, **kwargs)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"Fail the test if the expression is true."
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
if callableObj is None:
return _AssertRaisesContext(excClass, self)
try:
callableObj(*args, **kwargs)
except excClass:
return
if hasattr(excClass,'__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
if callable_obj is None:
return context
context.__enter__()
try:
callable_obj(*args, **kwargs)
except:
if not context.__exit__(*sys.exc_info()):
raise
else:
return
else:
context.__exit__(None, None, None)
def assertLogs(self, logger=None, level=None):
"""Fail unless a log message of level *level* or higher is emitted
on *logger_name* or its children. If omitted, *level* defaults to
INFO and *logger* defaults to the root logger.
This method must be used as a context manager, and will yield
a recording object with two attributes: `output` and `records`.
At the end of the context manager, the `output` attribute will
be a list of the matching formatted log messages and the
`records` attribute will be a list of the corresponding LogRecord
objects.
Example::
with self.assertLogs('foo', level='INFO') as cm:
logging.getLogger('foo').info('first message')
logging.getLogger('foo.bar').error('second message')
self.assertEqual(cm.output, ['INFO:foo:first message',
'ERROR:foo.bar:second message'])
"""
return _AssertLogsContext(self, logger, level)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % _common_shorten_repr(first, second)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
differing = '%ss differ: %s != %s\n' % (
(seq_type_name.capitalize(),) +
_common_shorten_repr(seq1, seq2))
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError:
e = sys.exc_info()[1]
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError:
e = sys.exc_info()[1]
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError:
e = sys.exc_info()[1]
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError:
e = sys.exc_info()[1]
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % _common_shorten_repr(d1, d2)
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.items():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, six.string_types, (
'First argument is not a string'))
self.assertIsInstance(second, six.string_types, (
'Second argument is not a string'))
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(True)
secondlines = second.splitlines(True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % _common_shorten_repr(first, second)
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regex.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, six.string_types):
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, six.string_types):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
assertNotRegexpMatches = _deprecate(assertNotRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
class _SubTest(TestCase):
def __init__(self, test_case, message, params):
super(_SubTest, self).__init__()
self._message = message
self.test_case = test_case
self.params = params
self.failureException = test_case.failureException
def runTest(self):
raise NotImplementedError("subtests cannot be run directly")
def _subDescription(self):
parts = []
if self._message:
parts.append("[{0}]".format(self._message))
if self.params:
params_desc = ', '.join(
"{0}={1!r}".format(k, v)
for (k, v) in sorted(self.params.items()))
parts.append("({0})".format(params_desc))
return " ".join(parts) or '(<subtest>)'
def id(self):
return "{0} {1}".format(self.test_case.id(), self._subDescription())
def shortDescription(self):
"""Returns a one-line description of the subtest, or None if no
description has been provided.
"""
return self.test_case.shortDescription()
def __str__(self):
return "{0} {1}".format(self.test_case, self._subDescription())
| apache-2.0 |
watonyweng/horizon | openstack_dashboard/dashboards/project/images/snapshots/urls.py | 54 | 1048 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.images.snapshots import views
urlpatterns = patterns(
'',
url(r'^(?P<instance_id>[^/]+)/create',
views.CreateView.as_view(),
name='create')
)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.