code
stringlengths 1
199k
|
|---|
import zlib
from . import register_test
blacklisted_extensions = ("dll", "exe", "dylib", "so", "sh", "class")
blacklisted_magic_numbers = (
(0x4d, 0x5a), # EXE/DLL
(0x5a, 0x4d), # Alternative for EXE/DLL
(0x7f, 0x45, 0x4c, 0x46), # UNIX elf
(0x23, 0x21), # Shebang (shell script)
(0xca, 0xfe, 0xba, 0xbe), # Java + Mach-O (dylib)
(0xca, 0xfe, 0xd0, 0x0d), # Java (packed)
(0xfe, 0xed, 0xfa, 0xce), # Mach-O
(0x46, 0x57, 0x53), # Uncompressed SWF
(0x43, 0x57, 0x53), # ZLIB compressed SWF
)
VC_DIRS = (".git", ".svn", )
@register_test(tier=1)
def test_blacklisted_files(err, package=None):
"Detects blacklisted files and extensions."
if not package:
return
flagged_files = []
flagged_for_vc = False
for name in package:
file_ = package.info(name)
if (file_["name_lower"].startswith(" ") or
file_["name_lower"].endswith(" ")):
err.error(
err_id=("packagelayout", "invalid_name"),
error="Filename starts with or ends with invalid character.",
description=["A filename within the package was found to "
"begin or end with a space. This is not "
"allowed.",
"Detected filename: '%s'" % name],
filename=name)
continue
# Simple test to ensure that the extension isn't blacklisted
extension = file_["extension"]
if extension in blacklisted_extensions:
# Note that there is a binary extension in the metadata
err.metadata["contains_binary_extension"] = True
flagged_files.append(name)
continue
if any(x in VC_DIRS for x in name.lower().split("/")):
if flagged_for_vc:
continue
flagged_for_vc = True
err.error(
err_id=("packagelayout", "version_control"),
error="Version control detected in package",
description=["A version control directory was detected in "
"your package. Version control may not be "
"included as part of a packaged app due to size "
"and potentially sensitive data.",
"Detected file: %s" % name],
filename=name)
continue
# Perform a deep inspection to detect magic numbers for known binary
# and executable file types.
try:
z = package.zf.open(name)
bytes = tuple(map(ord, z.read(4))) # Longest is 4 bytes
z.close()
except zlib.error:
# Tell the zip that there's a broken file.
package.broken_files.add(name)
return err.error(
err_id=("packagelayout", "blacklisted_files", "bad_zip"),
error="ZIP could not be read",
description="Validation failed because the ZIP package does "
"not seem to be valid. One or more files could not "
"be successfully unzipped.",
filename=name)
if any(bytes[0:len(x)] == x for x in blacklisted_magic_numbers):
# Note that there is binary content in the metadata
err.metadata["contains_binary_content"] = True
err.warning(
err_id=("testcases_packagelayout", "test_blacklisted_files",
"disallowed_file_type"),
warning="Flagged file type found",
description=["A file was found to contain flagged content "
"(i.e.: executable data, potentially "
"unauthorized scripts, etc.).",
u"The file \"%s\" contains flagged content" %
name],
filename=name)
if flagged_files:
err.warning(
err_id=("testcases_packagelayout", "test_blacklisted_files",
"disallowed_extension"),
warning="Flagged file extensions found.",
description=["Files whose names end with flagged extensions have "
"been found in the app.",
"The extension of these files are flagged because "
"they usually identify binary components, which can "
"contain malware.", "\n".join(flagged_files)])
@register_test(tier=1)
def test_layout_all(err, package):
"""Tests the well-formedness of apps."""
if not package:
return
package_namelist = list(package.zf.namelist())
package_nameset = set(package_namelist)
if len(package_namelist) != len(package_nameset):
err.error(
err_id=("testcases_packagelayout", "test_layout_all",
"duplicate_entries"),
error="Package contains duplicate entries",
description="The package contains multiple entries with the same "
"name. This practice has been banned. Try unzipping "
"and re-zipping your app and try again.")
if any(name.startswith('META-INF/') for name in package_nameset):
err.error(
err_id=("testcases_packagelayout", "test_layout_all",
"META-INF"),
error="Packages must not contain META-INF",
description="Packages must not contain a META-INF directory. This "
"directory prevents apps from being properly signed.")
|
import os
def limit(i):
dr=i.get('list',[])
drx=[]
for q in dr:
if q.find('.libs')<0:
drx.append(q)
return {'return':0, 'list':drx}
def version_cmd(i):
ck=i['ck_kernel']
fp=i['full_path']
fn=os.path.basename(fp)
rfp=os.path.realpath(fp)
rfn=os.path.basename(rfp)
ver=''
if rfn.startswith(fn):
ver=rfn[len(fn)+1:]
if ver!='':
ver='api-'+ver
return {'return':0, 'cmd':'', 'version':ver}
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
cus=i.get('customize',{})
fp=cus.get('full_path','')
hosd=i['host_os_dict']
tosd=i['target_os_dict']
# Check platform
hplat=hosd.get('ck_name','')
hproc=hosd.get('processor','')
tproc=tosd.get('processor','')
remote=tosd.get('remote','')
tbits=tosd.get('bits','')
env=i['env']
fi=cus.get('include_file','')
pi=fp
found=False
while True:
if os.path.isdir(os.path.join(pi,'lib')) or os.path.isdir(os.path.join(pi,'lib64')):
found=True
break
pix=os.path.dirname(pi)
if pix==pi:
break
pi=pix
if not found:
return {'return':1, 'error':'can\'t find root dir of this installation'}
lb=os.path.basename(fp)
lbs=lb
if lbs.endswith('.so'):
lbs=lbs[:-3]+'.a'
pl=os.path.dirname(fp)
cus['path_lib']=pl
pl1=os.path.dirname(pl)
pl2=os.path.dirname(pl1)
pi=''
if os.path.isfile(os.path.join(pl1,'include',fi)):
pi=pl1
elif os.path.isfile(os.path.join(pl2,'include',fi)):
pi=pl2
if pi=='':
return {'return':1, 'error':'can\'t find include file'}
cus['path_include']=os.path.join(pi,'include')
cus['include_name']=fi
cus['static_lib']=lbs
cus['dynamic_lib']=lb
r = ck.access({'action': 'lib_path_export_script', 'module_uoa': 'os', 'host_os_dict': hosd,
'lib_path': cus.get('path_lib','')})
if r['return']>0: return r
s += r['script']
ep=cus.get('env_prefix','')
if pi!='':
env[ep]=pi
env[ep+'_INCLUDE_NAME']=cus.get('include_name','')
env[ep+'_STATIC_NAME']=cus.get('static_lib','')
env[ep+'_DYNAMIC_NAME']=cus.get('dynamic_lib','')
return {'return':0, 'bat':s}
|
import os
from django.apps import apps
from django.db import connection
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import find_command, popen_wrapper
from django.test import SimpleTestCase, ignore_warnings
from django.test.utils import captured_stderr, captured_stdout, extend_sys_path
from django.utils import translation
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils._os import upath
from django.utils.six import StringIO
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
self.assertRaises(CommandError, management.call_command, ('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
self.assertIn("CommandError", stderr.getvalue())
def test_deactivate_locale_set(self):
# Deactivate translation when set to true
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_false', stdout=out)
self.assertEqual(out.getvalue(), "")
def test_configured_locale_preserved(self):
# Leaves locale from settings when set to false
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_true', stdout=out)
self.assertEqual(out.getvalue(), "pl\n")
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Test that management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
@ignore_warnings(category=RemovedInDjango20Warning)
def test_optparse_compatibility(self):
"""
optparse should be supported during Django 1.8/1.9 releases.
"""
out = StringIO()
management.call_command('optparse_cmd', stdout=out)
self.assertEqual(out.getvalue(), "All right, let's dance Rock'n'Roll.\n")
# Simulate command line execution
with captured_stdout() as stdout, captured_stderr():
management.execute_from_command_line(['django-admin', 'optparse_cmd'])
self.assertEqual(stdout.getvalue(), "All right, let's dance Rock'n'Roll.\n")
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
out = StringIO()
with self.assertRaises(CommandError):
management.call_command('hal', stdout=out)
def test_output_transaction(self):
out = StringIO()
management.call_command('transaction', stdout=out, no_color=True)
output = out.getvalue().strip()
self.assertTrue(output.startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter = self.counter + 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
self.assertRaises(CommandError, popen_wrapper, ['a_42_command_that_doesnt_exist_42'])
|
'''Quantizing a continuous distribution in 2d
Author: josef-pktd
'''
import numpy as np
def prob_bv_rectangle(lower, upper, cdf):
'''helper function for probability of a rectangle in a bivariate distribution
Parameters
----------
lower : array_like
tuple of lower integration bounds
upper : array_like
tuple of upper integration bounds
cdf : callable
cdf(x,y), cumulative distribution function of bivariate distribution
how does this generalize to more than 2 variates ?
'''
probuu = cdf(*upper)
probul = cdf(upper[0], lower[1])
problu = cdf(lower[0], upper[1])
probll = cdf(*lower)
return probuu - probul - problu + probll
def prob_mv_grid(bins, cdf, axis=-1):
'''helper function for probability of a rectangle grid in a multivariate distribution
how does this generalize to more than 2 variates ?
bins : tuple
tuple of bin edges, currently it is assumed that they broadcast
correctly
'''
if not isinstance(bins, np.ndarray):
bins = map(np.asarray, bins)
n_dim = len(bins)
bins_ = []
#broadcast if binedges are 1d
if all(map(np.ndim, bins) == np.ones(n_dim)):
for d in xrange(n_dim):
sl = [None]*n_dim
sl[d] = slice(None)
bins_.append(bins[d][sl])
else: #assume it is already correctly broadcasted
n_dim = bins.shape[0]
bins_ = bins
print len(bins)
cdf_values = cdf(bins_)
probs = cdf_values.copy()
for d in xrange(n_dim):
probs = np.diff(probs, axis=d)
return probs
def prob_quantize_cdf(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
cdf_values = cdf(binsx[:,None], binsy)
cdf_func = lambda x, y: cdf_values[x,y]
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (xind, yind)
lower = (xind-1, yind-1)
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf_func)
assert not np.isnan(probs).any()
return probs
def prob_quantize_cdf_old(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
old version without precomputing cdf values
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (binsx[xind], binsy[yind])
lower = (binsx[xind-1], binsy[yind-1])
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf)
assert not np.isnan(probs).any()
return probs
if __name__ == '__main__':
from numpy.testing import assert_almost_equal
unif_2d = lambda x,y: x*y
assert_almost_equal(prob_bv_rectangle([0,0], [1,0.5], unif_2d), 0.5, 14)
assert_almost_equal(prob_bv_rectangle([0,0], [0.5,0.5], unif_2d), 0.25, 14)
arr1b = np.array([[ 0.05, 0.05, 0.05, 0.05],
[ 0.05, 0.05, 0.05, 0.05],
[ 0.05, 0.05, 0.05, 0.05],
[ 0.05, 0.05, 0.05, 0.05],
[ 0.05, 0.05, 0.05, 0.05]])
arr1a = prob_quantize_cdf(np.linspace(0,1,6), np.linspace(0,1,5), unif_2d)
assert_almost_equal(arr1a, arr1b, 14)
arr2b = np.array([[ 0.25],
[ 0.25],
[ 0.25],
[ 0.25]])
arr2a = prob_quantize_cdf(np.linspace(0,1,5), np.linspace(0,1,2), unif_2d)
assert_almost_equal(arr2a, arr2b, 14)
arr3b = np.array([[ 0.25, 0.25, 0.25, 0.25]])
arr3a = prob_quantize_cdf(np.linspace(0,1,2), np.linspace(0,1,5), unif_2d)
assert_almost_equal(arr3a, arr3b, 14)
|
from os import path
from gluon import *
from gluon.storage import Storage
from s3 import *
class index():
""" Custom Home Page """
def __call__(self):
request = current.request
response = current.response
response.title = current.deployment_settings.get_system_name()
T = current.T
db = current.db
auth = current.auth
s3 = response.s3
appname = request.application
settings = current.deployment_settings
has_module = settings.has_module
if has_module("cr"):
table = current.s3db.cr_shelter
SHELTERS = s3.crud_strings["cr_shelter"].title_list
else:
SHELTERS = ""
# Menu Boxes
menu_btns = [#div, label, app, function
#["col1", T("Staff"), "hrm", "staff"],
#["col1", T("Volunteers"), "vol", "volunteer"],
["col1", T("Projects"), "project", "project"],
["col1", T("Vehicles"), "vehicle", "vehicle"],
["col2", T("Assets"), "asset", "asset"],
["col2", T("Inventory Items"), "inv", "inv_item"],
#["facility", T("Facilities"), "org", "facility"],
["facility", T("Hospitals"), "hms", "hospital"],
["facility", T("Offices"), "org", "office"],
["facility", SHELTERS, "cr", "shelter"],
["facility", T("Transport"), "transport", "index"],
["facility", T("Warehouses"), "inv", "warehouse"],
]
menu_divs = {"col1": DIV(_id="menu_div_col1", _class="menu_div"),
"col2": DIV(_id="menu_div_col2", _class="menu_div"),
"facility": DIV(H3(T("Facilities")),
_id = "facility_box",
_class = "menu_box"),
}
for div, label, app, function in menu_btns:
if has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A(DIV(label,
_class="menu-btn-r"),
_class="menu-btn-l",
_href = URL(app, function)
)
)
cols_box = DIV(H3(T("Humanitarian Projects")),
DIV(_id="menu_div_col0"),
menu_divs["col1"],
menu_divs["col2"],
_id="cols_box",
#_class="menu_box fleft swidth"
_class="menu_box"
)
facility_box = menu_divs["facility"]
facility_box.append(A(IMG(_src="/%s/static/img/map_icon_128.png" % \
appname),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
datatable_ajax_source = ""
# Check logged in AND permissions
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
if AUTHENTICATED in roles and \
auth.s3_has_permission("read", current.s3db.org_organisation):
org_items = self.organisation()
datatable_ajax_source = "/%s/default/organisation.aadata" % \
appname
s3.actions = None
auth.permission.controller = "org"
auth.permission.function = "site"
permitted_facilities = auth.permitted_facilities(redirect_on_error=False)
manage_facility_box = ""
if permitted_facilities:
facility_list = s3_represent_facilities(db, permitted_facilities,
link=False)
facility_list = sorted(facility_list, key=lambda fac: fac[1])
facility_opts = [OPTION(opt[1], _value = opt[0])
for opt in facility_list]
if facility_list:
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft"
)
s3.jquery_ready.append(
'''$('#manage_facility_select').change(function(){
$('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val()))
})''')
else:
manage_facility_box = DIV()
org_box = DIV(H3(T("Organizations")),
A(T("Add Organization"),
_href = URL(c="org", f="organisation",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right: 10px;"),
org_items,
_id = "org_box",
_class = "menu_box fleft"
)
else:
manage_facility_box = ""
org_box = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
register_form = auth.register()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
# Add client-side validation
s3_register_validation()
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system" % \
dict(login=B(T("login")))))))
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:5,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
view = path.join(request.folder, "private", "templates",
"EUROSHA", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
return dict(title = response.title,
cols_box = cols_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
org_box = org_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -------------------------------------------------------------------------
@staticmethod
def organisation():
"""
Function to handle pagination for the org list on the homepage
"""
request = current.request
resource = current.s3db.resource("org_organisation")
totalrows = resource.count()
table = resource.table
list_fields = ["id", "name"]
limit = int(request.get_vars["iDisplayLength"]) if request.extension == "aadata" else 1
rfields = resource.resolve_selectors(list_fields)[0]
(orderby, filter) = S3DataTable.getControlData(rfields, request.vars)
resource.add_filter(filter)
filteredrows = resource.count()
if isinstance(orderby, bool):
orderby = table.name
rows = resource.select(list_fields,
orderby=orderby,
start=0,
limit=limit,
)
data = resource.extract(rows,
list_fields,
represent=True,
)
dt = S3DataTable(rfields, data)
dt.defaultActionButtons(resource)
current.response.s3.no_formats = True
if request.extension == "html":
items = dt.html(totalrows,
filteredrows,
"org_list_1",
dt_displayLength=10,
dt_ajax_url=URL(c="default",
f="organisation",
extension="aadata",
vars={"id": "org_list_1"},
),
)
elif request.extension.lower() == "aadata":
limit = resource.count()
if "sEcho" in request.vars:
echo = int(request.vars.sEcho)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
"org_list_1",
echo)
else:
from gluon.http import HTTP
raise HTTP(501, current.manager.ERROR.BAD_FORMAT)
return items
|
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_hka5 as sensorObj
def main():
# Instantiate a HKA5 sensor on uart 0. We don't use the set or
# reset pins, so we pass -1 for them.
sensor = sensorObj.HKA5(0, -1, -1)
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# update once every 2 seconds and output data
while (True):
sensor.update()
print("PM 1 :", end=' ')
print(sensor.getPM1(), end=' ')
print(" ug/m3")
print("PM 2.5:", end=' ')
print(sensor.getPM2_5(), end=' ')
print(" ug/m3")
print("PM 10 :", end=' ')
print(sensor.getPM10(), end=' ')
print(" ug/m3")
print()
time.sleep(2)
if __name__ == '__main__':
main()
|
__all__ = ['ExplorerTableElementWdg', 'ExplorerElementWdg']
from pyasm.common import TacticException, Common
from pyasm.biz import Project
from pyasm.web import Widget
from pyasm.widget import IconWdg
from tactic.ui.common import BaseTableElementWdg
from tactic.ui.widget import IconButtonWdg
class ExplorerElementWdg(BaseTableElementWdg):
ARGS_KEYS = {
'mode': {
'description': 'Determines which directory to go to when the explorer button is pressed.',
'type': 'SelectWdg',
'values': 'sandbox|repository',
'category': 'options'
}
}
def get_decrement(my):
decrement = my.get_option('decrement')
if not decrement:
decrement = 0
else:
decrement = int(decrement)
return decrement
def get_base_dir( my, sobject):
decrement = my.get_decrement()
base_dir = Project.get_sandbox_base_dir(sobject, decrement=decrement)
return base_dir
def get_lib_dir(my, sobject):
snapshot = None
base_dir = Project.get_project_lib_dir(sobject, snapshot)
return base_dir
def get_client_repo_dir(my, sobject):
snapshot = None
base_dir = Project.get_project_client_lib_dir(sobject, snapshot)
#TODO: u could decrement the client repo dir here, if really needed
return base_dir
def get_title(my):
widget = Widget()
title = super(ExplorerElementWdg, my).get_title()
widget.add( title )
return widget
def get_display(my):
sobject = my.get_current_sobject()
mode = my.get_option('mode')
if not mode:
mode = 'sandbox'
widget = Widget()
sobject_dir = ''
sobject_lib_dir = ''
# find the path to open explorer
if sobject.is_insert():
button = IconWdg("No Path Found", IconWdg.CROSS, long=False)
else:
try:
if mode == 'sandbox':
sobject_dir = my.get_base_dir(sobject)
elif mode in ['client_repo', 'repository']:
sobject_dir = my.get_client_repo_dir(sobject)
sobject_lib_dir = my.get_lib_dir(sobject)
sobject_dir = sobject_dir.strip()
sobject_dir = Common.process_unicode_string(sobject_dir)
except TacticException, e:
print "WARNING: ", str(e)
button = IconWdg("No Path Found", IconWdg.CROSS, long=False)
else:
button = IconButtonWdg(title="Explore: %s" % sobject_dir, icon=IconWdg.LOAD)
if sobject_dir == sobject_lib_dir:
button.add_behavior({'type':'click_up', 'cbjs_action':"spt.alert('You are not allowed to browse directories on a web server.');"})
else:
button.add_behavior({'type':'click_up', 'cbjs_action':'''var applet = spt.Applet.get(); applet.makedirs('%s'); applet.open_explorer('%s');''' % (sobject_dir, sobject_dir)} )
widget.add(button)
return widget
def is_editable(cls):
'''to avoid all those CellEditWdg'''
return False
is_editable = classmethod(is_editable)
class ExplorerTableElementWdg(ExplorerElementWdg):
pass
|
"""
Course Goals Python API
"""
import models
from six import text_type
from opaque_keys.edx.keys import CourseKey
from django.conf import settings
from rest_framework.reverse import reverse
from course_modes.models import CourseMode
from openedx.features.course_experience import ENABLE_COURSE_GOALS
def add_course_goal(user, course_id, goal_key):
"""
Add a new course goal for the provided user and course. If the goal
already exists, simply update and save the goal.
Arguments:
user: The user that is setting the goal
course_id (string): The id for the course the goal refers to
goal_key (string): The goal key for the new goal.
"""
course_key = CourseKey.from_string(text_type(course_id))
current_goal = get_course_goal(user, course_key)
if current_goal:
# If a course goal already exists, simply update it.
current_goal.goal_key = goal_key
current_goal.save(update_fields=['goal_key'])
else:
# Otherwise, create and save a new course goal.
new_goal = models.CourseGoal(user=user, course_key=course_key, goal_key=goal_key)
new_goal.save()
def get_course_goal(user, course_key):
"""
Given a user and a course_key, return their course goal.
If the user is anonymous or a course goal does not exist, returns None.
"""
if user.is_anonymous:
return None
course_goals = models.CourseGoal.objects.filter(user=user, course_key=course_key)
return course_goals[0] if course_goals else None
def remove_course_goal(user, course_id):
"""
Given a user and a course_id, remove the course goal.
"""
course_key = CourseKey.from_string(course_id)
course_goal = get_course_goal(user, course_key)
if course_goal:
course_goal.delete()
def get_goal_api_url(request):
"""
Returns the endpoint for accessing REST API.
"""
return reverse('course_goals_api:v0:course_goal-list', request=request)
def has_course_goal_permission(request, course_id, user_access):
"""
Returns whether the user can access the course goal functionality.
Only authenticated users that are enrolled in a verifiable course
can use this feature.
"""
course_key = CourseKey.from_string(course_id)
has_verified_mode = CourseMode.has_verified_mode(CourseMode.modes_for_course_dict(unicode(course_id)))
return user_access['is_enrolled'] and has_verified_mode and ENABLE_COURSE_GOALS.is_enabled(course_key) \
and settings.FEATURES.get('ENABLE_COURSE_GOALS')
def get_course_goal_options():
"""
Returns the valid options for goal keys, mapped to their translated
strings, as defined by theCourseGoal model.
"""
return {goal_key: goal_text for goal_key, goal_text in models.GOAL_KEY_CHOICES}
def valid_course_goals_ordered():
"""
Returns a list of the valid options for goal keys ordered by the level of commitment.
Each option is represented as a tuple, with (goal_key, goal_string).
This list does not return the unsure option since it does not have a relevant commitment level.
"""
goal_options = get_course_goal_options()
ordered_goal_options = []
ordered_goal_options.append((models.GOAL_KEY_CHOICES.certify, goal_options[models.GOAL_KEY_CHOICES.certify]))
ordered_goal_options.append((models.GOAL_KEY_CHOICES.complete, goal_options[models.GOAL_KEY_CHOICES.complete]))
ordered_goal_options.append((models.GOAL_KEY_CHOICES.explore, goal_options[models.GOAL_KEY_CHOICES.explore]))
return ordered_goal_options
|
import sys
import os
path_to_db = os.environ.get(
'RADICAL_PILOT_DBURL', "mongodb://localhost:27017/rp")
os.environ['RADICAL_PILOT_DBURL'] = path_to_db
import time
from adaptivemd import Project, ExecutionPlan
from adaptivemd import AllegroCluster
from adaptivemd import ExecutionPlan
from adaptivemd import OpenMMEngine4CUDA
from adaptivemd import PyEMMAAnalysis
from adaptivemd import File
if __name__ == '__main__':
project = Project('testcase-5')
# --------------------------------------------------------------------------
# CREATE THE RESOURCE
# the instance to know about the place where we run simulations
# --------------------------------------------------------------------------
resource_id = 'fub.allegro'
project.initialize(AllegroCluster())
# --------------------------------------------------------------------------
# CREATE THE ENGINE
# the instance to create trajectories
# --------------------------------------------------------------------------
pdb_file = File('file://../files/alanine/alanine.pdb').named('initial_pdb')
engine = OpenMMEngine4CUDA(
pdb_file=pdb_file,
system_file=File('file://../files/alanine/system.xml'),
integrator_file=File('file://../files/alanine/integrator.xml'),
args='-r --report-interval 10 --store-interval 1 -p CPU'
).named('openmm')
# --------------------------------------------------------------------------
# CREATE AN ANALYZER
# the instance that knows how to compute a msm from the trajectories
# --------------------------------------------------------------------------
modeller = PyEMMAAnalysis(
pdb_file=pdb_file
).named('pyemma')
project.generators.add(engine)
project.generators.add(modeller)
# --------------------------------------------------------------------------
# CREATE THE CLUSTER
# the instance that runs the simulations on the resource
# --------------------------------------------------------------------------
gpu_scheduler = [project.get_scheduler('gpu', runtime=4*24*60) for _ in range(4)]
pyemma_scheduler = project.get_scheduler('cpu', cores=1, runtime=4*24*60)
# create 4 trajectories
trajectories = project.new_trajectory(pdb_file, 100, 4)
gpu_scheduler[0](trajectories)
gpu_scheduler[0].wait()
# now start adaptive loop
def strategy_trajectory(scheduler, loops, num):
for loop in range(loops):
trajectories = [project.new_ml_trajectory(length=20, number=4) for _ in range(num)]
tasks = map(engine.run, trajectories)
tasklist = scheduler(tasks)
yield tasklist.is_done()
events = [
ExecutionPlan(strategy_trajectory(scheduler, 100, 10))
for scheduler in gpu_scheduler]
map(project.add_event, events)
def strategy_model(scheduler, steps):
while any(events):
num = len(project.trajectories)
task = scheduler(modeller.execute(list(project.trajectories)))
yield task.is_done
cond = project.on_ntraj(num + steps)
yield lambda: cond() or not any(events)
ev3 = ExecutionPlan(strategy_model(pyemma_scheduler, steps))
project.add_event(ev3)
print
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
# try:
# while project._events:
# sys.stdout.write('# of trajectories : %8d / # of models : %8d \n' % (
# len(project.trajectories),
# len(project.models)
# ))
# sys.stdout.flush()
# time.sleep(1.0)
# sys.stdout.write(CURSOR_UP_ONE + ERASE_LINE)
# except KeyboardInterrupt:
# pass
try:
while project._events:
time.sleep(2.0)
except KeyboardInterrupt:
pass
_ = [scheduler.exit() for scheduler in gpu_scheduler]
pyemma_scheduler.exit()
print 'DONE !!!'
sys.stdout.write('# of trajectories : %8d / # of models : %8d \n' % (
len(project.trajectories),
len(project.models)
))
project.close()
|
import pilasengine
import sys
sys.path.insert(0, "..")
pilas = pilasengine.iniciar()
mono = pilas.actores.Mono()
mono.aprender(pilas.habilidades.SeguirClicks)
mono.aprender(pilas.habilidades.AumentarConRueda)
pilas.avisar(u"El mono sigue los clicks, y cambia de tamaño si mueves la\nrueda del mouse.")
pilas.ejecutar()
|
from oslotest import mockpatch
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import snapshots_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services.compute import base
class TestSnapshotsClient(base.BaseComputeServiceTest):
FAKE_SNAPSHOT = {
"createdAt": "2015-10-02T16:27:54.724209",
"displayDescription": u"Another \u1234.",
"displayName": u"v\u1234-001",
"id": "100",
"size": 100,
"status": "available",
"volumeId": "12"
}
FAKE_SNAPSHOTS = {"snapshots": [FAKE_SNAPSHOT]}
def setUp(self):
super(TestSnapshotsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = snapshots_client.SnapshotsClient(
fake_auth, 'compute', 'regionOne')
def _test_create_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.create_snapshot,
'tempest.lib.common.rest_client.RestClient.post',
{"snapshot": self.FAKE_SNAPSHOT},
to_utf=bytes_body, status=200,
volume_id=self.FAKE_SNAPSHOT["volumeId"])
def test_create_snapshot_with_str_body(self):
self._test_create_snapshot()
def test_create_shapshot_with_bytes_body(self):
self._test_create_snapshot(bytes_body=True)
def _test_show_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot,
'tempest.lib.common.rest_client.RestClient.get',
{"snapshot": self.FAKE_SNAPSHOT},
to_utf=bytes_body, snapshot_id=self.FAKE_SNAPSHOT["id"])
def test_show_snapshot_with_str_body(self):
self._test_show_snapshot()
def test_show_snapshot_with_bytes_body(self):
self._test_show_snapshot(bytes_body=True)
def _test_list_snapshots(self, bytes_body=False, **params):
self.check_service_client_function(
self.client.list_snapshots,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SNAPSHOTS, to_utf=bytes_body, **params)
def test_list_snapshots_with_str_body(self):
self._test_list_snapshots()
def test_list_snapshots_with_byte_body(self):
self._test_list_snapshots(bytes_body=True)
def test_list_snapshots_with_params(self):
self._test_list_snapshots('fake')
def test_delete_snapshot(self):
self.check_service_client_function(
self.client.delete_snapshot,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=202, snapshot_id=self.FAKE_SNAPSHOT['id'])
def test_is_resource_deleted_true(self):
module = ('tempest.lib.services.compute.snapshots_client.'
'SnapshotsClient.show_snapshot')
self.useFixture(mockpatch.Patch(
module, side_effect=lib_exc.NotFound))
self.assertTrue(self.client.is_resource_deleted('fake-id'))
def test_is_resource_deleted_false(self):
module = ('tempest.lib.services.compute.snapshots_client.'
'SnapshotsClient.show_snapshot')
self.useFixture(mockpatch.Patch(
module, return_value={}))
self.assertFalse(self.client.is_resource_deleted('fake-id'))
|
import re
import sys
import subprocess
import os
def generate(test):
with open("tests/template.fmt") as file:
template = file.read()
lines = []
for line in re.split('(?<=[;{}])\n', test.read()):
match = re.match('(?: *\n)*( *)(.*)=>(.*);', line, re.DOTALL | re.MULTILINE)
if match:
tab, test, expect = match.groups()
lines.append(tab+'res = {test};'.format(test=test.strip()))
lines.append(tab+'test_assert("{name}", res, {expect});'.format(
name = re.match('\w*', test.strip()).group(),
expect = expect.strip()))
else:
lines.append(line)
with open('test.c', 'w') as file:
file.write(template.format(tests='\n'.join(lines)))
def compile():
os.environ['CFLAGS'] = os.environ.get('CFLAGS', '') + ' -Werror'
subprocess.check_call(['make', '--no-print-directory', '-s'], env=os.environ)
def execute():
subprocess.check_call(["./lfs"])
def main(test=None):
if test and not test.startswith('-'):
with open(test) as file:
generate(file)
else:
generate(sys.stdin)
compile()
if test == '-s':
sys.exit(1)
execute()
if __name__ == "__main__":
main(*sys.argv[1:])
|
m = 0
for line in open('/data/fuzzyjoin/pub/csx-id.txt'):
l = len(line)
if (l > m):
m = l
print m
|
from oslo.config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san.san import SanISCSIDriver
LOG = logging.getLogger(__name__)
solaris_opts = [
cfg.StrOpt('san_zfs_volume_base',
default='rpool/',
help='The ZFS path under which to create zvols for volumes.'), ]
CONF = cfg.CONF
CONF.register_opts(solaris_opts)
class SolarisISCSIDriver(SanISCSIDriver):
"""Executes commands relating to Solaris-hosted ISCSI volumes.
Basic setup for a Solaris iSCSI server:
pkg install storage-server SUNWiscsit
svcadm enable stmf
svcadm enable -r svc:/network/iscsi/target:default
pfexec itadm create-tpg e1000g0 ${MYIP}
pfexec itadm create-target -t e1000g0
Then grant the user that will be logging on lots of permissions.
I'm not sure exactly which though:
zfs allow justinsb create,mount,destroy rpool
usermod -P'File System Management' justinsb
usermod -P'Primary Administrator' justinsb
Also make sure you can login using san_login & san_password/san_private_key
"""
def __init__(self, *cmd, **kwargs):
super(SolarisISCSIDriver, self).__init__(execute=self.solaris_execute,
*cmd, **kwargs)
self.configuration.append_config_values(solaris_opts)
def solaris_execute(self, *cmd, **kwargs):
new_cmd = ['pfexec']
new_cmd.extend(cmd)
return super(SolarisISCSIDriver, self).san_execute(*new_cmd, **kwargs)
def _view_exists(self, luid):
(out, _err) = self._execute('/usr/sbin/stmfadm',
'list-view', '-l', luid,
check_exit_code=False)
if "no views found" in out:
return False
if "View Entry:" in out:
return True
msg = _("Cannot parse list-view output: %s") % out
raise exception.VolumeBackendAPIException(data=msg)
def _get_target_groups(self):
"""Gets list of target groups from host."""
(out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg')
matches = self._get_prefixed_values(out, 'Target group: ')
LOG.debug("target_groups=%s" % matches)
return matches
def _target_group_exists(self, target_group_name):
return target_group_name not in self._get_target_groups()
def _get_target_group_members(self, target_group_name):
(out, _err) = self._execute('/usr/sbin/stmfadm',
'list-tg', '-v', target_group_name)
matches = self._get_prefixed_values(out, 'Member: ')
LOG.debug("members of %s=%s" % (target_group_name, matches))
return matches
def _is_target_group_member(self, target_group_name, iscsi_target_name):
return iscsi_target_name in (
self._get_target_group_members(target_group_name))
def _get_iscsi_targets(self):
(out, _err) = self._execute('/usr/sbin/itadm', 'list-target')
matches = self._collect_lines(out)
# Skip header
if len(matches) != 0:
assert 'TARGET NAME' in matches[0]
matches = matches[1:]
targets = []
for line in matches:
items = line.split()
assert len(items) == 3
targets.append(items[0])
LOG.debug("_get_iscsi_targets=%s" % (targets))
return targets
def _iscsi_target_exists(self, iscsi_target_name):
return iscsi_target_name in self._get_iscsi_targets()
def _build_zfs_poolname(self, volume):
zfs_poolname = '%s%s' % (self.configuration.san_zfs_volume_base,
volume['name'])
return zfs_poolname
def create_volume(self, volume):
"""Creates a volume."""
if int(volume['size']) == 0:
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
zfs_poolname = self._build_zfs_poolname(volume)
# Create a zfs volume
cmd = ['/usr/sbin/zfs', 'create']
if self.configuration.san_thin_provision:
cmd.append('-s')
cmd.extend(['-V', sizestr])
cmd.append(zfs_poolname)
self._execute(*cmd)
def _get_luid(self, volume):
zfs_poolname = self._build_zfs_poolname(volume)
zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname
(out, _err) = self._execute('/usr/sbin/sbdadm', 'list-lu')
lines = self._collect_lines(out)
# Strip headers
if len(lines) >= 1:
if lines[0] == '':
lines = lines[1:]
if len(lines) >= 4:
assert 'Found' in lines[0]
assert '' == lines[1]
assert 'GUID' in lines[2]
assert '------------------' in lines[3]
lines = lines[4:]
for line in lines:
items = line.split()
assert len(items) == 3
if items[2] == zvol_name:
luid = items[0].strip()
return luid
msg = _('LUID not found for %(zfs_poolname)s. '
'Output=%(out)s') % {'zfs_poolname': zfs_poolname, 'out': out}
raise exception.VolumeBackendAPIException(data=msg)
def _is_lu_created(self, volume):
luid = self._get_luid(volume)
return luid
def delete_volume(self, volume):
"""Deletes a volume."""
zfs_poolname = self._build_zfs_poolname(volume)
self._execute('/usr/sbin/zfs', 'destroy', zfs_poolname)
def local_path(self, volume):
# TODO(justinsb): Is this needed here?
escaped_group = self.configuration.volume_group.replace('-', '--')
escaped_name = volume['name'].replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
#TODO(justinsb): On bootup, this is called for every volume.
# It then runs ~5 SSH commands for each volume,
# most of which fetch the same info each time
# This makes initial start stupid-slow
return self._do_export(volume, force_create=False)
def create_export(self, context, volume):
return self._do_export(volume, force_create=True)
def _do_export(self, volume, force_create):
# Create a Logical Unit (LU) backed by the zfs volume
zfs_poolname = self._build_zfs_poolname(volume)
if force_create or not self._is_lu_created(volume):
zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname
self._execute('/usr/sbin/sbdadm', 'create-lu', zvol_name)
luid = self._get_luid(volume)
iscsi_name = self._build_iscsi_target_name(volume)
target_group_name = 'tg-%s' % volume['name']
# Create a iSCSI target, mapped to just this volume
if force_create or not self._target_group_exists(target_group_name):
self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name)
# Yes, we add the initiatior before we create it!
# Otherwise, it complains that the target is already active
if force_create or not self._is_target_group_member(target_group_name,
iscsi_name):
self._execute('/usr/sbin/stmfadm',
'add-tg-member', '-g', target_group_name, iscsi_name)
if force_create or not self._iscsi_target_exists(iscsi_name):
self._execute('/usr/sbin/itadm', 'create-target', '-n', iscsi_name)
if force_create or not self._view_exists(luid):
self._execute('/usr/sbin/stmfadm',
'add-view', '-t', target_group_name, luid)
#TODO(justinsb): Is this always 1? Does it matter?
iscsi_portal_interface = '1'
iscsi_portal = \
self.configuration.san_ip + ":3260," + iscsi_portal_interface
db_update = {}
db_update['provider_location'] = ("%s %s" %
(iscsi_portal,
iscsi_name))
return db_update
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
# This is the reverse of _do_export
luid = self._get_luid(volume)
iscsi_name = self._build_iscsi_target_name(volume)
target_group_name = 'tg-%s' % volume['name']
if self._view_exists(luid):
self._execute('/usr/sbin/stmfadm', 'remove-view', '-l', luid, '-a')
if self._iscsi_target_exists(iscsi_name):
self._execute('/usr/sbin/stmfadm', 'offline-target', iscsi_name)
self._execute('/usr/sbin/itadm', 'delete-target', iscsi_name)
# We don't delete the tg-member; we delete the whole tg!
if self._target_group_exists(target_group_name):
self._execute('/usr/sbin/stmfadm', 'delete-tg', target_group_name)
if self._is_lu_created(volume):
self._execute('/usr/sbin/sbdadm', 'delete-lu', luid)
def _collect_lines(self, data):
"""Split lines from data into an array, trimming them."""
matches = []
for line in data.splitlines():
match = line.strip()
matches.append(match)
return matches
def _get_prefixed_values(self, data, prefix):
"""Collect lines which start with prefix; with trimming."""
matches = []
for line in data.splitlines():
line = line.strip()
if line.startswith(prefix):
match = line[len(prefix):]
match = match.strip()
matches.append(match)
return matches
|
'''main method for integration test topology'''
import argparse
import logging
import sys
from .basic_one_task import basic_one_task_builder
from .all_grouping import all_grouping_buidler
from .none_grouping import none_grouping_builder
from .one_bolt_multi_tasks import one_bolt_multi_tasks_builder
from .one_spout_bolt_multi_tasks import one_spout_bolt_multi_tasks_builder
from .shuffle_grouping import shuffle_grouping_builder
from .one_spout_two_bolts import one_spout_two_bolts_builder
from .one_spout_multi_tasks import one_spout_multi_tasks_builder
from .multi_spouts_multi_tasks import multi_spouts_multi_tasks_builder
from .fields_grouping import fields_grouping_builder
from .bolt_double_emit_tuples import bolt_double_emit_tuples_builder
from .global_grouping import global_grouping_builder
TOPOLOGY_BUILDERS = {
'PyHeron_IntegrationTest_BasicOneTask': basic_one_task_builder,
'PyHeron_IntegrationTest_AllGrouping': all_grouping_buidler,
'PyHeron_IntegrationTest_NoneGrouping': none_grouping_builder,
'PyHeron_IntegrationTest_OneBoltMultiTasks': one_bolt_multi_tasks_builder,
'PyHeron_IntegrationTest_OneSpoutBoltMultiTasks': one_spout_bolt_multi_tasks_builder,
'PyHeron_IntegrationTest_ShuffleGrouping': shuffle_grouping_builder,
'PyHeron_IntegrationTest_OneSpoutTwoBolts': one_spout_two_bolts_builder,
'PyHeron_IntegrationTest_OneSpoutMultiTasks': one_spout_multi_tasks_builder,
'PyHeron_IntegrationTest_MultiSpoutsMultiTasks': multi_spouts_multi_tasks_builder,
'PyHeron_IntegrationTest_FieldsGrouping': fields_grouping_builder,
'PyHeron_IntegrationTest_BoltDoubleEmitTuples': bolt_double_emit_tuples_builder,
'PyHeron_IntegrationTest_GlobalGrouping': global_grouping_builder,
}
def main():
parser = argparse.ArgumentParser(description='Python topology submitter')
parser.add_argument('-r', '--results-server-url', dest='results_url', required=True)
parser.add_argument('-t', '--topology-name', dest='topology_name', required=True)
args, unknown_args = parser.parse_known_args()
if unknown_args:
logging.error('Unknown argument passed to %s: %s', sys.argv[0], unknown_args[0])
sys.exit(1)
http_server_url = args.results_url
# 1470884422_PyHeron_IntegrationTest_BasicOneTask_dca9bb1c-dd3b-4ea6-97dc-ea0cea265adc
# --> PyHeron_IntegrationTest_BasicOneTask
topology_name_with_uuid = args.topology_name
topology_name = '_'.join(topology_name_with_uuid.split('_')[1:-1])
if topology_name not in TOPOLOGY_BUILDERS:
logging.error("%s not found in the list", topology_name)
sys.exit(2)
builder = TOPOLOGY_BUILDERS[topology_name]
topo_class = builder(topology_name_with_uuid, http_server_url)
topo_class.write()
if __name__ == '__main__':
main()
|
"""Self-test suite for Crypto.Random.new()"""
import sys
import unittest
from Crypto.Util.py3compat import b
class SimpleTest(unittest.TestCase):
def runTest(self):
"""Crypto.Random.new()"""
# Import the Random module and try to use it
from Crypto import Random
randobj = Random.new()
x = randobj.read(16)
y = randobj.read(16)
self.assertNotEqual(x, y)
z = Random.get_random_bytes(16)
self.assertNotEqual(x, z)
self.assertNotEqual(y, z)
# Test the Random.random module, which
# implements a subset of Python's random API
# Not implemented:
# seed(), getstate(), setstate(), jumpahead()
# random(), uniform(), triangular(), betavariate()
# expovariate(), gammavariate(), gauss(),
# longnormvariate(), normalvariate(),
# vonmisesvariate(), paretovariate()
# weibullvariate()
# WichmannHill(), whseed(), SystemRandom()
from Crypto.Random import random
x = random.getrandbits(16*8)
y = random.getrandbits(16*8)
self.assertNotEqual(x, y)
# Test randrange
if x>y:
start = y
stop = x
else:
start = x
stop = y
for step in range(1,10):
x = random.randrange(start,stop,step)
y = random.randrange(start,stop,step)
self.assertNotEqual(x, y)
self.assertEqual(start <= x < stop, True)
self.assertEqual(start <= y < stop, True)
self.assertEqual((x - start) % step, 0)
self.assertEqual((y - start) % step, 0)
for i in range(10):
self.assertEqual(random.randrange(1,2), 1)
self.assertRaises(ValueError, random.randrange, start, start)
self.assertRaises(ValueError, random.randrange, stop, start, step)
self.assertRaises(TypeError, random.randrange, start, stop, step, step)
self.assertRaises(TypeError, random.randrange, start, stop, "1")
self.assertRaises(TypeError, random.randrange, "1", stop, step)
self.assertRaises(TypeError, random.randrange, 1, "2", step)
self.assertRaises(ValueError, random.randrange, start, stop, 0)
# Test randint
x = random.randint(start,stop)
y = random.randint(start,stop)
self.assertNotEqual(x, y)
self.assertEqual(start <= x <= stop, True)
self.assertEqual(start <= y <= stop, True)
for i in range(10):
self.assertEqual(random.randint(1,1), 1)
self.assertRaises(ValueError, random.randint, stop, start)
self.assertRaises(TypeError, random.randint, start, stop, step)
self.assertRaises(TypeError, random.randint, "1", stop)
self.assertRaises(TypeError, random.randint, 1, "2")
# Test choice
seq = list(range(10000))
x = random.choice(seq)
y = random.choice(seq)
self.assertNotEqual(x, y)
self.assertEqual(x in seq, True)
self.assertEqual(y in seq, True)
for i in range(10):
self.assertEqual(random.choice((1,2,3)) in (1,2,3), True)
self.assertEqual(random.choice([1,2,3]) in [1,2,3], True)
if sys.version_info[0] is 3:
self.assertEqual(random.choice(bytearray(b('123'))) in bytearray(b('123')), True)
self.assertEqual(1, random.choice([1]))
self.assertRaises(IndexError, random.choice, [])
self.assertRaises(TypeError, random.choice, 1)
# Test shuffle. Lacks random parameter to specify function.
# Make copies of seq
seq = list(range(500))
x = list(seq)
y = list(seq)
random.shuffle(x)
random.shuffle(y)
self.assertNotEqual(x, y)
self.assertEqual(len(seq), len(x))
self.assertEqual(len(seq), len(y))
for i in range(len(seq)):
self.assertEqual(x[i] in seq, True)
self.assertEqual(y[i] in seq, True)
self.assertEqual(seq[i] in x, True)
self.assertEqual(seq[i] in y, True)
z = [1]
random.shuffle(z)
self.assertEqual(z, [1])
if sys.version_info[0] == 3:
z = bytearray(b('12'))
random.shuffle(z)
self.assertEqual(b('1') in z, True)
self.assertRaises(TypeError, random.shuffle, b('12'))
self.assertRaises(TypeError, random.shuffle, 1)
self.assertRaises(TypeError, random.shuffle, "11")
self.assertRaises(TypeError, random.shuffle, (1,2))
# 2to3 wraps a list() around it, alas - but I want to shoot
# myself in the foot here! :D
# if sys.version_info[0] == 3:
# self.assertRaises(TypeError, random.shuffle, range(3))
# Test sample
x = random.sample(seq, 20)
y = random.sample(seq, 20)
self.assertNotEqual(x, y)
for i in range(20):
self.assertEqual(x[i] in seq, True)
self.assertEqual(y[i] in seq, True)
z = random.sample([1], 1)
self.assertEqual(z, [1])
z = random.sample((1,2,3), 1)
self.assertEqual(z[0] in (1,2,3), True)
z = random.sample("123", 1)
self.assertEqual(z[0] in "123", True)
z = random.sample(list(range(3)), 1)
self.assertEqual(z[0] in range(3), True)
if sys.version_info[0] == 3:
z = random.sample(b("123"), 1)
self.assertEqual(z[0] in b("123"), True)
z = random.sample(bytearray(b("123")), 1)
self.assertEqual(z[0] in bytearray(b("123")), True)
self.assertRaises(TypeError, random.sample, 1)
def get_tests(config={}):
return [SimpleTest()]
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
|
import logging
import sys
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
|
"""
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
import Utils
import Colors
from .. base import odict
from Constants import BORDER_PROXIMITY_SENSITIVITY
from Constants import \
BLOCK_LABEL_PADDING, \
PORT_SEPARATION, LABEL_SEPARATION, \
PORT_BORDER_SEPARATION, POSSIBLE_ROTATIONS
import pygtk
pygtk.require('2.0')
import gtk
import pango
BLOCK_MARKUP_TMPL="""\
<span foreground="$foreground" font_desc="Sans 8"><b>$encode($block.get_name())</b></span>"""
class Block(Element):
"""The graphical signal block."""
""" image_path from ../python/Block.py"""
def __init__(self, image_path):
"""
Block contructor.
Add graphics related params to the block.
"""
#add the position param
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Coordinate',
'key': '_coordinate',
'type': 'raw',
'value': '(0, 0)',
'hide': 'all',
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Rotation',
'key': '_rotation',
'type': 'raw',
'value': '0',
'hide': 'all',
})
))
self.image_path= image_path
def get_coordinate(self):
"""
Get the coordinate from the position param.
@return the coordinate tuple (x, y) or (0, 0) if failure
"""
try: #should evaluate to tuple
coor = eval(self.get_param('_coordinate').get_value())
x, y = map(int, coor)
fgW,fgH = self.get_parent().get_size()
if x <= 0:
x = 0
elif x >= fgW - BORDER_PROXIMITY_SENSITIVITY:
x = fgW - BORDER_PROXIMITY_SENSITIVITY
if y <= 0:
y = 0
elif y >= fgH - BORDER_PROXIMITY_SENSITIVITY:
y = fgH - BORDER_PROXIMITY_SENSITIVITY
return (x, y)
except:
self.set_coordinate((0, 0))
return (0, 0)
def set_coordinate(self, coor):
"""
Set the coordinate into the position param.
@param coor the coordinate tuple (x, y)
"""
self.get_param('_coordinate').set_value(str(coor))
def get_rotation(self):
"""
Get the rotation from the position param.
@return the rotation in degrees or 0 if failure
"""
try: #should evaluate to dict
rotation = eval(self.get_param('_rotation').get_value())
return int(rotation)
except:
self.set_rotation(POSSIBLE_ROTATIONS[0])
return POSSIBLE_ROTATIONS[0]
def set_rotation(self, rot):
"""
Set the rotation into the position param.
@param rot the rotation in degrees
"""
self.get_param('_rotation').set_value(str(rot))
def create_shapes(self):
"""Update the block, parameters, and ports when a change occurs."""
Element.create_shapes(self)
if self.is_horizontal(): self.add_area((0, 0), (self.W, self.H))
elif self.is_vertical(): self.add_area((0, 0), (self.H, self.W))
def get_key_parent(self, parent_string):
import re
return str(re.search(r"\((.*?)\)",parent_string).group(1))
def create_labels(self):
"""Create the labels for the signal block."""
Element.create_labels(self)
self._bg_color = self.get_enabled() and Colors.BLOCK_ENABLED_COLOR or Colors.BLOCK_DISABLED_COLOR
layouts = list()
#create the main layout
layout = gtk.DrawingArea().create_pango_layout('')
layouts.append(layout)
layout.set_markup(Utils.parse_template(BLOCK_MARKUP_TMPL, block=self))
self.label_width, self.label_height = layout.get_pixel_size()
#display the params
markups = [param.get_markup() for param in self.get_params() if param.get_hide() not in ('all', 'part')]
if markups:
layout = gtk.DrawingArea().create_pango_layout('')
layout.set_spacing(LABEL_SEPARATION*pango.SCALE)
layout.set_markup('\n'.join(markups))
layouts.append(layout)
w,h = layout.get_pixel_size()
self.label_width = max(w, self.label_width)
self.label_height += h + LABEL_SEPARATION
width = self.label_width
height = self.label_height
#setup the pixmap
pixmap = self.get_parent().new_pixmap(width, height)
gc = pixmap.new_gc()
gc.set_foreground(self._bg_color)
pixmap.draw_rectangle(gc, True, 0, 0, width, height)
#draw the layouts
h_off = 0
for i,layout in enumerate(layouts):
w,h = layout.get_pixel_size()
if i == 0: w_off = (width-w)/2
else: w_off = 0
pixmap.draw_layout(gc, w_off, h_off, layout)
h_off = h + h_off + LABEL_SEPARATION
#create vertical and horizontal pixmaps
self.horizontal_label = pixmap
if self.is_vertical():
self.vertical_label = self.get_parent().new_pixmap(height, width)
Utils.rotate_pixmap(gc, self.horizontal_label, self.vertical_label)
#calculate width and height needed
self.W = self.label_width + 2*BLOCK_LABEL_PADDING
self.H = max(*(
[self.label_height+2*BLOCK_LABEL_PADDING] + [2*PORT_BORDER_SEPARATION + \
sum([port.H + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in (self.get_sources(), self.get_sinks())]
))
def draw(self, gc, window):
"""
Draw the signal block with label and inputs/outputs.
@param gc the graphics context
@param window the gtk window to draw on
"""
x, y = self.get_coordinate()
#draw main block
# If the image path is NOT ''
if self.image_path != '':
self.new_pixbuf = gtk.gdk.pixbuf_new_from_file(self.image_path)
Element.draw_image(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or Colors.BORDER_COLOR, pixbuf = self.new_pixbuf
)
else:
Element.draw(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or Colors.BORDER_COLOR,
)
#draw label image
if self.is_horizontal():
window.draw_drawable(gc, self.horizontal_label, 0, 0, x+BLOCK_LABEL_PADDING, y+(self.H-self.label_height)/2, -1, -1)
elif self.is_vertical():
window.draw_drawable(gc, self.vertical_label, 0, 0, x+(self.H-self.label_height)/2, y+BLOCK_LABEL_PADDING, -1, -1)
#draw ports
for port in self.get_ports(): port.draw(gc, window)
def what_is_selected(self, coor, coor_m=None):
"""
Get the element that is selected.
@param coor the (x,y) tuple
@param coor_m the (x_m, y_m) tuple
@return this block, a port, or None
"""
for port in self.get_ports():
port_selected = port.what_is_selected(coor, coor_m)
if port_selected: return port_selected
return Element.what_is_selected(self, coor, coor_m)
|
"""Collector for MySQL."""
import errno
import os
import re
import socket
import sys
import time
try:
import MySQLdb
except ImportError:
MySQLdb = None # This is handled gracefully in main()
from collectors.etc import mysqlconf
from collectors.lib import utils
COLLECTION_INTERVAL = 15 # seconds
CONNECT_TIMEOUT = 2 # seconds
DB_REFRESH_INTERVAL = 60 # seconds
DEFAULT_SOCKFILES = set([
"/tmp/mysql.sock", # MySQL's own default.
"/var/lib/mysql/mysql.sock", # RH-type / RPM systems.
"/var/run/mysqld/mysqld.sock", # Debian-type systems.
])
SEARCH_DIRS = [
"/var/lib/mysql",
]
class DB(object):
"""Represents a MySQL server (as we can monitor more than 1 MySQL)."""
def __init__(self, sockfile, dbname, db, cursor, version):
"""Constructor.
Args:
sockfile: Path to the socket file.
dbname: Name of the database for that socket file.
db: A MySQLdb connection opened to that socket file.
cursor: A cursor acquired from that connection.
version: What version is this MySQL running (from `SELECT VERSION()').
"""
self.sockfile = sockfile
self.dbname = dbname
self.db = db
self.cursor = cursor
self.version = version
self.master = None
self.slave_bytes_executed = None
self.relay_bytes_relayed = None
version = version.split(".")
try:
self.major = int(version[0])
self.medium = int(version[1])
except (ValueError, IndexError), e:
self.major = self.medium = 0
def __str__(self):
return "DB(%r, %r, version=%r)" % (self.sockfile, self.dbname,
self.version)
def __repr__(self):
return self.__str__()
def isShowGlobalStatusSafe(self):
"""Returns whether or not SHOW GLOBAL STATUS is safe to run."""
# We can't run SHOW GLOBAL STATUS on versions prior to 5.1 because it
# locks the entire database for too long and severely impacts traffic.
return self.major > 5 or (self.major == 5 and self.medium >= 1)
def query(self, sql):
"""Executes the given SQL statement and returns a sequence of rows."""
assert self.cursor, "%s already closed?" % (self,)
try:
self.cursor.execute(sql)
except MySQLdb.OperationalError, (errcode, msg):
if errcode != 2006: # "MySQL server has gone away"
raise
self._reconnect()
return self.cursor.fetchall()
def close(self):
"""Closes the connection to this MySQL server."""
if self.cursor:
self.cursor.close()
self.cursor = None
if self.db:
self.db.close()
self.db = None
def _reconnect(self):
"""Reconnects to this MySQL server."""
self.close()
self.db = mysql_connect(self.sockfile)
self.cursor = self.db.cursor()
def mysql_connect(sockfile):
"""Connects to the MySQL server using the specified socket file."""
user, passwd = mysqlconf.get_user_password(sockfile)
return MySQLdb.connect(unix_socket=sockfile,
connect_timeout=CONNECT_TIMEOUT,
user=user, passwd=passwd)
def todict(db, row):
"""Transforms a row (returned by DB.query) into a dict keyed by column names.
Args:
db: The DB instance from which this row was obtained.
row: A row as returned by DB.query
"""
d = {}
for i, field in enumerate(db.cursor.description):
column = field[0].lower() # Lower-case to normalize field names.
d[column] = row[i]
return d
def get_dbname(sockfile):
"""Returns the name of the DB based on the path to the socket file."""
if sockfile in DEFAULT_SOCKFILES:
return "default"
m = re.search("/mysql-(.+)/[^.]+\.sock$", sockfile)
if not m:
utils.err("error: couldn't guess the name of the DB for " + sockfile)
return None
return m.group(1)
def find_sockfiles():
"""Returns a list of paths to socket files to monitor."""
paths = []
# Look for socket files.
for dir in SEARCH_DIRS:
if not os.path.isdir(dir):
continue
for name in os.listdir(dir):
subdir = os.path.join(dir, name)
if not os.path.isdir(subdir):
continue
for subname in os.listdir(subdir):
path = os.path.join(subdir, subname)
if utils.is_sockfile(path):
paths.append(path)
break # We only expect 1 socket file per DB, so get out.
# Try the default locations.
for sockfile in DEFAULT_SOCKFILES:
if not utils.is_sockfile(sockfile):
continue
paths.append(sockfile)
return paths
def find_databases(dbs=None):
"""Returns a map of dbname (string) to DB instances to monitor.
Args:
dbs: A map of dbname (string) to DB instances already monitored.
This map will be modified in place if it's not None.
"""
sockfiles = find_sockfiles()
if dbs is None:
dbs = {}
for sockfile in sockfiles:
dbname = get_dbname(sockfile)
if dbname in dbs:
continue
if not dbname:
continue
try:
db = mysql_connect(sockfile)
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
except (EnvironmentError, EOFError, RuntimeError, socket.error,
MySQLdb.MySQLError), e:
utils.err("Couldn't connect to %s: %s" % (sockfile, e))
continue
version = cursor.fetchone()[0]
dbs[dbname] = DB(sockfile, dbname, db, cursor, version)
return dbs
def now():
return int(time.time())
def isyes(s):
if s.lower() == "yes":
return 1
return 0
def collectInnodbStatus(db):
"""Collects and prints InnoDB stats about the given DB instance."""
ts = now()
def printmetric(metric, value, tags=""):
print "mysql.%s %d %s schema=%s%s" % (metric, ts, value, db.dbname, tags)
innodb_status = db.query("SHOW ENGINE INNODB STATUS")[0][2]
m = re.search("^(\d{6}\s+\d{1,2}:\d\d:\d\d) INNODB MONITOR OUTPUT$",
innodb_status, re.M)
if m: # If we have it, try to use InnoDB's own timestamp.
ts = int(time.mktime(time.strptime(m.group(1), "%y%m%d %H:%M:%S")))
line = None
def match(regexp):
return re.match(regexp, line)
for line in innodb_status.split("\n"):
# SEMAPHORES
m = match("OS WAIT ARRAY INFO: reservation count (\d+), signal count (\d+)")
if m:
printmetric("innodb.oswait_array.reservation_count", m.group(1))
printmetric("innodb.oswait_array.signal_count", m.group(2))
continue
m = match("Mutex spin waits (\d+), rounds (\d+), OS waits (\d+)")
if m:
printmetric("innodb.locks.spin_waits", m.group(1), " type=mutex")
printmetric("innodb.locks.rounds", m.group(2), " type=mutex")
printmetric("innodb.locks.os_waits", m.group(3), " type=mutex")
continue
m = match("RW-shared spins (\d+), OS waits (\d+);"
" RW-excl spins (\d+), OS waits (\d+)")
if m:
printmetric("innodb.locks.spin_waits", m.group(1), " type=rw-shared")
printmetric("innodb.locks.os_waits", m.group(2), " type=rw-shared")
printmetric("innodb.locks.spin_waits", m.group(3), " type=rw-exclusive")
printmetric("innodb.locks.os_waits", m.group(4), " type=rw-exclusive")
continue
# INSERT BUFFER AND ADAPTIVE HASH INDEX
# TODO(tsuna): According to the code in ibuf0ibuf.c, this line and
# the following one can appear multiple times. I've never seen this.
# If that happens, we need to aggregate the values here instead of
# printing them directly.
m = match("Ibuf: size (\d+), free list len (\d+), seg size (\d+),")
if m:
printmetric("innodb.ibuf.size", m.group(1))
printmetric("innodb.ibuf.free_list_len", m.group(2))
printmetric("innodb.ibuf.seg_size", m.group(3))
continue
m = match("(\d+) inserts, (\d+) merged recs, (\d+) merges")
if m:
printmetric("innodb.ibuf.inserts", m.group(1))
printmetric("innodb.ibuf.merged_recs", m.group(2))
printmetric("innodb.ibuf.merges", m.group(3))
continue
# ROW OPERATIONS
m = match("\d+ queries inside InnoDB, (\d+) queries in queue")
if m:
printmetric("innodb.queries_queued", m.group(1))
continue
m = match("(\d+) read views open inside InnoDB")
if m:
printmetric("innodb.opened_read_views", m.group(1))
continue
# TRANSACTION
m = match("History list length (\d+)")
if m:
printmetric("innodb.history_list_length", m.group(1))
continue
def collect(db):
"""Collects and prints stats about the given DB instance."""
ts = now()
def printmetric(metric, value, tags=""):
print "mysql.%s %d %s schema=%s%s" % (metric, ts, value, db.dbname, tags)
has_innodb = False
if db.isShowGlobalStatusSafe():
for metric, value in db.query("SHOW GLOBAL STATUS"):
try:
if "." in value:
value = float(value)
else:
value = int(value)
except ValueError:
continue
metric = metric.lower()
has_innodb = has_innodb or metric.startswith("innodb")
printmetric(metric, value)
if has_innodb:
collectInnodbStatus(db)
if has_innodb and False: # Disabled because it's too expensive for InnoDB.
waits = {} # maps a mutex name to the number of waits
ts = now()
for engine, mutex, status in db.query("SHOW ENGINE INNODB MUTEX"):
if not status.startswith("os_waits"):
continue
m = re.search("&(\w+)(?:->(\w+))?$", mutex)
if not m:
continue
mutex, kind = m.groups()
if kind:
mutex += "." + kind
wait_count = int(status.split("=", 1)[1])
waits[mutex] = waits.get(mutex, 0) + wait_count
for mutex, wait_count in waits.iteritems():
printmetric("innodb.locks", wait_count, " mutex=" + mutex)
ts = now()
mysql_slave_status = db.query("SHOW SLAVE STATUS")
if mysql_slave_status:
slave_status = todict(db, mysql_slave_status[0])
master_host = slave_status["master_host"]
else:
master_host = None
if master_host and master_host != "None":
sbm = slave_status.get("seconds_behind_master")
if isinstance(sbm, (int, long)):
printmetric("slave.seconds_behind_master", sbm)
printmetric("slave.bytes_executed", slave_status["exec_master_log_pos"])
printmetric("slave.bytes_relayed", slave_status["read_master_log_pos"])
printmetric("slave.thread_io_running",
isyes(slave_status["slave_io_running"]))
printmetric("slave.thread_sql_running",
isyes(slave_status["slave_sql_running"]))
states = {} # maps a connection state to number of connections in that state
for row in db.query("SHOW PROCESSLIST"):
id, user, host, db_, cmd, time, state = row[:7]
states[cmd] = states.get(cmd, 0) + 1
for state, count in states.iteritems():
state = state.lower().replace(" ", "_")
printmetric("connection_states", count, " state=%s" % state)
def main(args):
"""Collects and dumps stats from a MySQL server."""
if not find_sockfiles(): # Nothing to monitor.
return 13 # Ask tcollector to not respawn us.
if MySQLdb is None:
utils.err("error: Python module `MySQLdb' is missing")
return 1
last_db_refresh = now()
dbs = find_databases()
while True:
ts = now()
if ts - last_db_refresh >= DB_REFRESH_INTERVAL:
find_databases(dbs)
last_db_refresh = ts
errs = []
for dbname, db in dbs.iteritems():
try:
collect(db)
except (EnvironmentError, EOFError, RuntimeError, socket.error,
MySQLdb.MySQLError), e:
if isinstance(e, IOError) and e[0] == errno.EPIPE:
# Exit on a broken pipe. There's no point in continuing
# because no one will read our stdout anyway.
return 2
utils.err("error: failed to collect data from %s: %s" % (db, e))
errs.append(dbname)
for dbname in errs:
del dbs[dbname]
sys.stdout.flush()
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
sys.stdin.close()
sys.exit(main(sys.argv))
|
from odoo import models, fields
class ResCompany(models.Model):
_inherit = "res.company"
def _localization_use_documents(self):
""" Chilean localization use documents """
self.ensure_one()
return self.account_fiscal_country_id.code == "CL" or super()._localization_use_documents()
|
from openerp import models, fields, api, _
from openerp.fields import DATE_LENGTH
from openerp.exceptions import Warning
class ProjectProject(models.Model):
_inherit = 'project.project'
calculation_type = fields.Selection(
[('date_begin', 'Date begin'),
('date_end', 'Date end')],
string='Calculation type', default=False,
help='How to calculate tasks, with date start or date end references. '
'If not set, "Recalculate project" button is disabled.')
def _start_end_dates_prepare(self):
"""
Prepare project start or end date, looking into tasks list
and depending on project calculation_type
- if calculation_type == 'date_begin':
project end date = latest date from tasks end dates
- if calculation_type == 'date_end':
project start date = earliest date from tasks start dates
NOTE: Do not perform any write operations to DB
"""
vals = {}
self.ensure_one()
if not self.tasks:
return vals
from_string = fields.Datetime.from_string
# Here we consider all project task, the ones in a stage with
# include_in_recalculate = False and the ones with
# include_in_recalculate = True
start_task = min(self.tasks,
key=lambda t: from_string(t.date_start or t.date_end))
end_task = max(self.tasks,
key=lambda t: from_string(t.date_end or t.date_start))
# Assign min/max dates if available
if self.calculation_type == 'date_begin' and end_task.date_end:
vals['date'] = end_task.date_end[:DATE_LENGTH]
if self.calculation_type == 'date_end' and start_task.date_start:
vals['date_start'] = start_task.date_start[:DATE_LENGTH]
return vals
@api.multi
def project_recalculate(self):
"""
Recalculate project tasks start and end dates.
After that, recalculate new project start or end date
"""
for project in self:
if not project.calculation_type:
raise Warning(_("Cannot recalculate project because your "
"project don't have calculation type."))
if (project.calculation_type == 'date_begin' and not
project.date_start):
raise Warning(_("Cannot recalculate project because your "
"project don't have date start."))
if (project.calculation_type == 'date_end' and not
project.date):
raise Warning(_("Cannot recalculate project because your "
"project don't have date end."))
for task in project.tasks:
task.task_recalculate()
vals = project._start_end_dates_prepare()
if vals:
project.write(vals)
return True
|
"""
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from copy import deepcopy
from cStringIO import StringIO
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
)
from django.contrib import messages
from django.db import transaction
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.models import User
from courseware.access import has_access
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from courseware.grades import iterate_grades_for
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole
from student.models import CourseEnrollment
from instructor.views.api import _split_input_list
from instructor.views.gradebook_api import get_grade_book_page
from instructor.enrollment import (
enroll_email,
get_email_params,
)
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import (
get_override_for_ccx,
override_field_for_ccx,
clear_ccx_field_info_from_ccx_map,
bulk_delete_ccx_override_fields,
)
from lms.djangoapps.ccx.utils import (
add_master_course_staff_to_ccx,
assign_coach_role_to_ccx,
ccx_course,
ccx_students_enrolling_center,
get_ccx_for_coach,
get_ccx_by_ccx_id,
get_ccx_creation_dict,
get_date,
parse_date,
prep_course_for_grading,
)
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
course_key = ccx.course_id
course = get_course_by_id(course_key, depth=None)
is_staff = has_access(request.user, 'staff', course)
is_instructor = has_access(request.user, 'instructor', course)
if is_staff or is_instructor:
# if user is staff or instructor then he can view ccx coach dashboard.
return view(request, course, ccx)
else:
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(_('You must be a CCX Coach to access this view.'))
# if there is a ccx, we must validate that it is the ccx for this coach
if ccx is not None:
coach_ccx = get_ccx_by_ccx_id(course, request.user, ccx.id)
if coach_ccx is None:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
context.update(get_ccx_creation_dict(course))
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
# At this point we are done with verification that current user is ccx coach.
assign_coach_role_to_ccx(ccx_locator, request.user, course.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CourseEnrollment.objects.filter(course_id=ccx_locator, is_active=True)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
with ccx_course(ccx_locator) as course:
context['course'] = course
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
if hasattr(course, 'ccx_connector') and course.ccx_connector:
# if ccx connector url is set in course settings then inform user that he can
# only create ccx by using ccx connector url.
context = get_ccx_creation_dict(course)
messages.error(request, context['use_ccx_con_error_message'])
return render_to_response('ccx/coach_dashboard.html', context)
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Enforce a static limit for the maximum amount of students that can be enrolled
override_field_for_ccx(ccx, course, 'max_student_enrollments_allowed', settings.CCX_MAX_STUDENTS_ALLOWED)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, ccx.id)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
# Enroll the coach in the course
email_params = get_email_params(course, auto_enroll=True, course_key=ccx_id, display_name=ccx.display_name)
enroll_email(
course_id=ccx_id,
student_email=request.user.email,
auto_enroll=True,
email_students=True,
email_params=email_params,
)
assign_coach_role_to_ccx(ccx_id, request.user, course.id)
add_master_course_staff_to_ccx(course, ccx_id, ccx.display_name)
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None, ccx_ids_to_delete=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
if ccx_ids_to_delete is None:
ccx_ids_to_delete = []
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'start_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'start')
# Only subsection (aka sequential) and unit (aka vertical) have due dates.
if 'due' in unit: # checking that the key (due) exist in dict (unit).
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
else:
# In case of section aka chapter we do not have due date.
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
# For a vertical, override start and due dates of all its problems.
if unit.get('category', None) == u'vertical':
for component in block.get_children():
# override start and due date of problem (Copy dates of vertical into problems)
if start:
override_field_for_ccx(ccx, component, 'start', start)
if due:
override_field_for_ccx(ccx, component, 'due', due)
if children:
override_fields(block, children, graded, earliest, ccx_ids_to_delete)
return earliest, ccx_ids_to_delete
graded = {}
earliest, ccx_ids_to_delete = override_fields(course, json.loads(request.body), graded, [])
bulk_delete_ccx_override_fields(ccx, ccx_ids_to_delete)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section.get('min_count', 0):
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
Visits students visible nodes only; nodes children of hidden ones
are skipped as well.
Dates:
Only start date is applicable to a section. If ccx coach did not override start date then
getting it from the master course.
Both start and due dates are applicable to a subsection (aka sequential). If ccx coach did not override
these dates then getting these dates from corresponding subsection in master course.
Unit inherits start date and due date from its subsection. If ccx coach did not override these dates
then getting them from corresponding subsection in master course.
"""
for child in node.get_children():
# in case the children are visible to staff only, skip them
if child.visible_to_staff_only:
continue
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
start = get_date(ccx, child, 'start')
if depth > 1:
# Subsection has both start and due dates and unit inherit dates from their subsections
if depth == 2:
due = get_date(ccx, child, 'due')
elif depth == 3:
# Get start and due date of subsection in case unit has not override dates.
due = get_date(ccx, child, 'due', node)
start = get_date(ccx, child, 'start', node)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
else:
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, content_type='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
email_students = 'email-students' in request.POST
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params, ccx.coach)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""
Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
email_students = 'email-students' in request.POST
identifiers = [student_id]
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
errors = ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params, ccx.coach)
for error_message in errors:
messages.error(request, error_message)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
student_info, page = get_grade_book_page(request, course, course_key=ccx_key)
return render_to_response('courseware/gradebook.html', {
'page': page,
'page_url': reverse('ccx_gradebook', kwargs={'course_id': ccx_key}),
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
response = HttpResponse(buf.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment'
return response
|
import re, os, sys
from Tester import Tester
from RunParallel import RunParallel # For TIMEOUT value
class AnalyzeJacobian(Tester):
@staticmethod
def validParams():
params = Tester.validParams()
params.addRequiredParam('input', "The input file to use for this test.")
params.addParam('test_name', "The name of the test - populated automatically")
params.addParam('expect_out', "A regular expression that must occur in the input in order for the test to be considered passing.")
params.addParam('resize_mesh', False, "Resize the input mesh")
params.addParam('off_diagonal', True, "Also test the off-diagonal Jacobian entries")
params.addParam('mesh_size', 1, "Resize the input mesh")
return params
def __init__(self, name, params):
Tester.__init__(self, name, params)
def getCommand(self, options):
specs = self.specs
# Create the command line string to run
command = specs['moose_dir'] + '/python/jacobiandebug/analyzejacobian.py'
# Check for built application
if not options.dry_run and not os.path.exists(command):
print 'Application not found: ' + str(specs['executable'])
sys.exit(1)
mesh_options = ' -m %s' % options.method
if specs['resize_mesh'] :
mesh_options += ' -r -s %d' % specs['mesh_size']
if not specs['off_diagonal'] :
mesh_options += ' -D'
command += mesh_options + ' ' + specs['input'] + ' -e ' + specs['executable'] + ' ' + ' '.join(specs['cli_args'])
return command
def processResults(self, moose_dir, retcode, options, output):
reason = ''
specs = self.specs
if specs.isValid('expect_out'):
out_ok = self.checkOutputForPattern(output, specs['expect_out'])
if (out_ok and retcode != 0):
reason = 'OUT FOUND BUT CRASH'
elif (not out_ok):
reason = 'NO EXPECTED OUT'
if reason == '':
if retcode == RunParallel.TIMEOUT:
reason = 'TIMEOUT'
elif retcode != 0 :
reason = 'CRASH'
return (reason, output)
def checkOutputForPattern(self, output, re_pattern):
if re.search(re_pattern, output, re.MULTILINE | re.DOTALL) == None:
return False
else:
return True
|
'''OpenGL extension SGIX.interlace
This module customises the behaviour of the
OpenGL.raw.GL.SGIX.interlace to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a way to interlace rows of pixels when
rasterizing pixel rectangles, and loading texture images. In this
context, interlacing means skiping over rows of pixels or texels
in the destination. This is useful for dealing with video data
since a single frame of video is typically composed from two images
or fields: one image specifying the data for even rows of the frame
and the other image specifying the data for odd rows of the frame.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIX/interlace.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.interlace import *
from OpenGL.raw.GL.SGIX.interlace import _EXTENSION_NAME
def glInitInterlaceSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0114_merge_20180621_1322'),
]
operations = [
migrations.AlterField(
model_name='providerassetfile',
name='name',
field=models.CharField(choices=[('favicon', 'favicon'), ('powered_by_share', 'powered_by_share'), ('sharing', 'sharing'), ('square_color_no_transparent', 'square_color_no_transparent'), ('square_color_transparent', 'square_color_transparent'), ('style', 'style'), ('wide_black', 'wide_black'), ('wide_color', 'wide_color'), ('wide_white', 'wide_white')], max_length=63),
),
]
|
import unittest, struct
import os
import sys
from test import support
import math
from math import isinf, isnan, copysign, ldexp
import operator
import random, fractions
INF = float("inf")
NAN = float("nan")
have_getformat = hasattr(float, "__getformat__")
requires_getformat = unittest.skipUnless(have_getformat,
"requires __getformat__")
requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"),
"requires __setformat__")
test_dir = os.path.dirname(__file__) or os.curdir
format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float(b" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
self.assertRaises(ValueError, float, ".nan")
self.assertRaises(ValueError, float, "+.inf")
self.assertRaises(ValueError, float, ".")
self.assertRaises(ValueError, float, "-.")
self.assertRaises(ValueError, float, b"-")
self.assertRaises(TypeError, float, {})
# Lone surrogate
self.assertRaises(UnicodeEncodeError, float, '\uD8F0')
# check that we don't accept alternate exponent markers
self.assertRaises(ValueError, float, "-1.7d29")
self.assertRaises(ValueError, float, "3D-14")
self.assertEqual(float(" \u0663.\u0661\u0664 "), 3.14)
# XXX self.assertEqual(float("\N{EM SPACE}3.14\N{EN SPACE}"), 3.14)
# extra long strings should not be a problem
float(b'.' + b'1'*1000)
float('.' + '1'*1000)
def test_error_message(self):
testlist = ('\xbd', '123\xbd', ' 123 456 ')
for s in testlist:
try:
float(s)
except ValueError as e:
self.assertIn(s.strip(), e.args[0])
else:
self.fail("Expected int(%r) to raise a ValueError", s)
@support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntax
import locale
if not locale.localeconv()['decimal_point'] == ',':
return
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertAlmostEqual(float(" .25e-1 "), .025)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo0:
def __float__(self):
return 42.
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
# Issue 5759: __float__ not called on str subclasses (though it is on
# unicode subclasses).
class FooStr(str):
def __float__(self):
return float(str(self)) + 1
self.assertAlmostEqual(float(Foo0()), 42.)
self.assertAlmostEqual(float(Foo1()), 42.)
self.assertAlmostEqual(float(Foo2()), 42.)
self.assertAlmostEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
self.assertAlmostEqual(float(FooStr('8')), 9.)
def test_is_integer(self):
self.assertFalse((1.1).is_integer())
self.assertTrue((1.).is_integer())
self.assertFalse(float("nan").is_integer())
self.assertFalse(float("inf").is_integer())
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
def test_float_containment(self):
floats = (INF, -INF, 0.0, 1.0, NAN)
for f in floats:
self.assertIn(f, [f])
self.assertIn(f, (f,))
self.assertIn(f, {f})
self.assertIn(f, {f: None})
self.assertEqual([f].count(f), 1, "[].count('%r') != 1" % f)
self.assertIn(f, floats)
for f in floats:
# nonidentical containers, same type, same contents
self.assertTrue([f] == [f], "[%r] != [%r]" % (f, f))
self.assertTrue((f,) == (f,), "(%r,) != (%r,)" % (f, f))
self.assertTrue({f} == {f}, "{%r} != {%r}" % (f, f))
self.assertTrue({f : None} == {f: None}, "{%r : None} != "
"{%r : None}" % (f, f))
# identical containers
l, t, s, d = [f], (f,), {f}, {f: None}
self.assertTrue(l == l, "[%r] not equal to itself" % f)
self.assertTrue(t == t, "(%r,) not equal to itself" % f)
self.assertTrue(s == s, "{%r} not equal to itself" % f)
self.assertTrue(d == d, "{%r : None} not equal to itself" % f)
def assertEqualAndEqualSign(self, a, b):
# fail unless a == b and a and b have the same sign bit;
# the only difference from assertEqual is that this test
# distinguishes -0.0 and 0.0.
self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b)))
@support.requires_IEEE_754
def test_float_mod(self):
# Check behaviour of % operator for IEEE 754 special cases.
# In particular, check signs of zeros.
mod = operator.mod
self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0)
self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100)
self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100)
self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0)
self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0)
@support.requires_IEEE_754
def test_float_pow(self):
# test builtin pow and ** operator for IEEE 754 special cases.
# Special cases taken from section F.9.4.4 of the C99 specification
for pow_op in pow, operator.pow:
# x**NAN is NAN for any x except 1
self.assertTrue(isnan(pow_op(-INF, NAN)))
self.assertTrue(isnan(pow_op(-2.0, NAN)))
self.assertTrue(isnan(pow_op(-1.0, NAN)))
self.assertTrue(isnan(pow_op(-0.5, NAN)))
self.assertTrue(isnan(pow_op(-0.0, NAN)))
self.assertTrue(isnan(pow_op(0.0, NAN)))
self.assertTrue(isnan(pow_op(0.5, NAN)))
self.assertTrue(isnan(pow_op(2.0, NAN)))
self.assertTrue(isnan(pow_op(INF, NAN)))
self.assertTrue(isnan(pow_op(NAN, NAN)))
# NAN**y is NAN for any y except +-0
self.assertTrue(isnan(pow_op(NAN, -INF)))
self.assertTrue(isnan(pow_op(NAN, -2.0)))
self.assertTrue(isnan(pow_op(NAN, -1.0)))
self.assertTrue(isnan(pow_op(NAN, -0.5)))
self.assertTrue(isnan(pow_op(NAN, 0.5)))
self.assertTrue(isnan(pow_op(NAN, 1.0)))
self.assertTrue(isnan(pow_op(NAN, 2.0)))
self.assertTrue(isnan(pow_op(NAN, INF)))
# (+-0)**y raises ZeroDivisionError for y a negative odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0)
# (+-0)**y raises ZeroDivisionError for y finite and negative
# but not an odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5)
# (+-0)**y is +-0 for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0)
# (+-0)**y is 0 for y finite and positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0)
# (-1)**+-inf is 1
self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0)
# 1**y is 1 for any y, even if y is an infinity or nan
self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0)
# x**+-0 is 1 for any x, even if x is a zero, infinity, or nan
self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0)
# x**y defers to complex pow for finite negative x and
# non-integral y.
self.assertEqual(type(pow_op(-2.0, -0.5)), complex)
self.assertEqual(type(pow_op(-2.0, 0.5)), complex)
self.assertEqual(type(pow_op(-1.0, -0.5)), complex)
self.assertEqual(type(pow_op(-1.0, 0.5)), complex)
self.assertEqual(type(pow_op(-0.5, -0.5)), complex)
self.assertEqual(type(pow_op(-0.5, 0.5)), complex)
# x**-INF is INF for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF)
self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF)
# x**-INF is 0 for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0)
# x**INF is 0 for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0)
# x**INF is INF for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, INF), INF)
self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(INF, INF), INF)
# (-INF)**y is -0.0 for y a negative odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0)
# (-INF)**y is 0.0 for y negative but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0)
# (-INF)**y is -INF for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF)
# (-INF)**y is INF for y positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF)
# INF**y is INF for y positive
self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF)
self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF)
# INF**y is 0.0 for y negative
self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0)
# basic checks not covered by the special cases above
self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0)
self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0)
# 1 ** large and -1 ** large; some libms apparently
# have problems with these
self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0)
# check sign for results that underflow to 0
self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0)
self.assertEqual(type(pow_op(-2.0, -2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0)
self.assertEqual(type(pow_op(-0.5, 2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0)
# check we don't raise an exception for subnormal results,
# and validate signs. Tests currently disabled, since
# they fail on systems where a subnormal result from pow
# is flushed to zero (e.g. Debian/ia64.)
#self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315)
@requires_setformat
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assertIn(float.__getformat__('double'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertIn(float.__getformat__('float'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = b'\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = bytes(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = b'\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = bytes(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = b'\x7f\x80\x00\x00'
LE_FLOAT_INF = bytes(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = b'\x7f\xc0\x00\x00'
LE_FLOAT_NAN = bytes(reversed(BE_FLOAT_NAN))
@requires_setformat
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
@unittest.skip("brython, skip for now")
class IEEEFormatTestCase(unittest.TestCase):
@support.requires_IEEE_754
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
@support.requires_IEEE_754
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
class FormatTestCase(unittest.TestCase):
def test_format(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
self.assertEqual(format(0.0, 'f'), '0.000000')
# the default is 'g', except for empty format spec
self.assertEqual(format(0.0, ''), '0.0')
self.assertEqual(format(0.01, ''), '0.01')
self.assertEqual(format(0.01, 'g'), '0.01')
# empty presentation type should format in the same way as str
# (issue 5920)
x = 100/7.
self.assertEqual(format(x, ''), str(x))
self.assertEqual(format(x, '-'), str(x))
self.assertEqual(format(x, '>'), str(x))
self.assertEqual(format(x, '2'), str(x))
self.assertEqual(format(1.0, 'f'), '1.000000')
self.assertEqual(format(-1.0, 'f'), '-1.000000')
self.assertEqual(format( 1.0, ' f'), ' 1.000000')
self.assertEqual(format(-1.0, ' f'), '-1.000000')
self.assertEqual(format( 1.0, '+f'), '+1.000000')
self.assertEqual(format(-1.0, '+f'), '-1.000000')
# % formatting
self.assertEqual(format(-1.0, '%'), '-100.000000%')
# conversion to string should fail
self.assertRaises(ValueError, format, 3.0, "s")
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# issue 3382
self.assertEqual(format(NAN, 'f'), 'nan')
self.assertEqual(format(NAN, 'F'), 'NAN')
self.assertEqual(format(INF, 'f'), 'inf')
self.assertEqual(format(INF, 'F'), 'INF')
@unittest.skip("brython, skip for now")
@support.requires_IEEE_754
def test_format_testfile(self):
with open(format_testfile) as testfile:
for line in testfile:
if line.startswith('--'):
continue
line = line.strip()
if not line:
continue
lhs, rhs = map(str.strip, line.split('->'))
fmt, arg = lhs.split()
self.assertEqual(fmt % float(arg), rhs)
self.assertEqual(fmt % -float(arg), '-' + rhs)
def test_issue5864(self):
self.assertEqual(format(123.456, '.4'), '123.5')
self.assertEqual(format(1234.56, '.4'), '1.235e+03')
self.assertEqual(format(12345.6, '.4'), '1.235e+04')
class ReprTestCase(unittest.TestCase):
@unittest.skip("brython, skip for now")
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_short_repr(self):
# test short float repr introduced in Python 3.1. One aspect
# of this repr is that we get some degree of str -> float ->
# str roundtripping. In particular, for any numeric string
# containing 15 or fewer significant digits, those exact same
# digits (modulo trailing zeros) should appear in the output.
# No more repr(0.03) -> "0.029999999999999999"!
test_strings = [
# output always includes *either* a decimal point and at
# least one digit after that point, or an exponent.
'0.0',
'1.0',
'0.01',
'0.02',
'0.03',
'0.04',
'0.05',
'1.23456789',
'10.0',
'100.0',
# values >= 1e16 get an exponent...
'1000000000000000.0',
'9999999999999990.0',
'1e+16',
'1e+17',
# ... and so do values < 1e-4
'0.001',
'0.001001',
'0.00010000000000001',
'0.0001',
'9.999999999999e-05',
'1e-05',
# values designed to provoke failure if the FPU rounding
# precision isn't set correctly
'8.72293771110361e+25',
'7.47005307342313e+26',
'2.86438000439698e+28',
'8.89142905246179e+28',
'3.08578087079232e+35',
]
for s in test_strings:
negs = '-'+s
self.assertEqual(s, repr(float(s)))
self.assertEqual(negs, repr(float(negs)))
# Since Python 3.2, repr and str are identical
self.assertEqual(repr(float(s)), str(float(s)))
self.assertEqual(repr(float(negs)), str(float(negs)))
@support.requires_IEEE_754
class RoundTestCase(unittest.TestCase):
def test_inf_nan(self):
self.assertRaises(OverflowError, round, INF)
self.assertRaises(OverflowError, round, -INF)
self.assertRaises(ValueError, round, NAN)
self.assertRaises(TypeError, round, INF, 0.0)
self.assertRaises(TypeError, round, -INF, 1.0)
self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer")
self.assertRaises(TypeError, round, -0.0, 1j)
@unittest.skip("brython, skip for now, rounding issues")
def test_large_n(self):
for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]:
self.assertEqual(round(123.456, n), 123.456)
self.assertEqual(round(-123.456, n), -123.456)
self.assertEqual(round(1e300, n), 1e300)
self.assertEqual(round(1e-320, n), 1e-320)
self.assertEqual(round(1e150, 300), 1e150)
self.assertEqual(round(1e300, 307), 1e300)
self.assertEqual(round(-3.1415, 308), -3.1415)
self.assertEqual(round(1e150, 309), 1e150)
self.assertEqual(round(1.4e-315, 315), 1e-315)
@unittest.skip("brython, skip for now, rounding issues")
def test_small_n(self):
for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]:
self.assertEqual(round(123.456, n), 0.0)
self.assertEqual(round(-123.456, n), -0.0)
self.assertEqual(round(1e300, n), 0.0)
self.assertEqual(round(1e-320, n), 0.0)
def test_overflow(self):
self.assertRaises(OverflowError, round, 1.6e308, -308)
self.assertRaises(OverflowError, round, -1.7e308, -308)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_previous_round_bugs(self):
# particular cases that have occurred in bug reports
self.assertEqual(round(562949953421312.5, 1),
562949953421312.5)
self.assertEqual(round(56294995342131.5, 3),
56294995342131.5)
# round-half-even
self.assertEqual(round(25.0, -1), 20.0)
self.assertEqual(round(35.0, -1), 40.0)
self.assertEqual(round(45.0, -1), 40.0)
self.assertEqual(round(55.0, -1), 60.0)
self.assertEqual(round(65.0, -1), 60.0)
self.assertEqual(round(75.0, -1), 80.0)
self.assertEqual(round(85.0, -1), 80.0)
self.assertEqual(round(95.0, -1), 100.0)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_matches_float_format(self):
# round should give the same results as float formatting
for i in range(500):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(5, 5000, 10):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(500):
x = random.random()
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
def test_format_specials(self):
# Test formatting of nans and infs.
def test(fmt, value, expected):
# Test with both % and format().
self.assertEqual(fmt % value, expected, fmt)
fmt = fmt[1:] # strip off the %
self.assertEqual(format(value, fmt), expected, fmt)
for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g',
'%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']:
pfmt = '%+' + fmt[1:]
sfmt = '% ' + fmt[1:]
test(fmt, INF, 'inf')
test(fmt, -INF, '-inf')
test(fmt, NAN, 'nan')
test(fmt, -NAN, 'nan')
# When asking for a sign, it's always provided. nans are
# always positive.
test(pfmt, INF, '+inf')
test(pfmt, -INF, '-inf')
test(pfmt, NAN, '+nan')
test(pfmt, -NAN, '+nan')
# When using ' ' for a sign code, only infs can be negative.
# Others have a space.
test(sfmt, INF, ' inf')
test(sfmt, -INF, '-inf')
test(sfmt, NAN, ' nan')
test(sfmt, -NAN, ' nan')
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assertTrue(isinf(float("inf")))
self.assertTrue(isinf(float("+inf")))
self.assertTrue(isinf(float("-inf")))
self.assertTrue(isinf(float("infinity")))
self.assertTrue(isinf(float("+infinity")))
self.assertTrue(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
self.assertRaises(ValueError, float, "++Inf")
self.assertRaises(ValueError, float, "-+inf")
self.assertRaises(ValueError, float, "+-infinity")
self.assertRaises(ValueError, float, "--Infinity")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assertTrue(isnan(float("nan")))
self.assertTrue(isnan(float("+nan")))
self.assertTrue(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
self.assertRaises(ValueError, float, "++nan")
self.assertRaises(ValueError, float, "-+NAN")
self.assertRaises(ValueError, float, "+-NaN")
self.assertRaises(ValueError, float, "--nAn")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def test_inf_signs(self):
self.assertEqual(copysign(1.0, float('inf')), 1.0)
self.assertEqual(copysign(1.0, float('-inf')), -1.0)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_nan_signs(self):
# When using the dtoa.c code, the sign of float('nan') should
# be predictable.
self.assertEqual(copysign(1.0, float('nan')), 1.0)
self.assertEqual(copysign(1.0, float('-nan')), -1.0)
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'an',
'nf',
'nfinity',
'inity',
'iinity',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
'0x1p\uff10', # fullwidth Unicode digits
'\uff10x1p0',
'0x\uff11p0',
'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_whitespace(self):
value_pairs = [
('inf', INF),
('-Infinity', -INF),
('nan', NAN),
('1.0', 1.0),
('-0x.2', -0.125),
('-0.0', -0.0)
]
whitespace = [
'',
' ',
'\t',
'\n',
'\n \t',
'\f',
'\v',
'\r'
]
for inp, expected in value_pairs:
for lead in whitespace:
for trail in whitespace:
got = fromHex(lead + inp + trail)
self.identical(got, expected)
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in range(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_main():
support.run_unittest(
GeneralFloatCases,
FormatFunctionsTestCase,
UnknownFormatTestCase,
IEEEFormatTestCase,
FormatTestCase,
ReprTestCase,
RoundTestCase,
InfNanTest,
HexFloatTestCase,
)
if __name__ == '__main__':
test_main()
|
"""
Test that 'stty -a' displays the same output before and after running the lldb command.
"""
from __future__ import print_function
import lldb
import six
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestSTTYBeforeAndAfter(TestBase):
mydir = TestBase.compute_mydir(__file__)
@classmethod
def classCleanup(cls):
"""Cleanup the test byproducts."""
cls.RemoveTempFile("child_send1.txt")
cls.RemoveTempFile("child_read1.txt")
cls.RemoveTempFile("child_send2.txt")
cls.RemoveTempFile("child_read2.txt")
@expectedFailureAll(
hostoslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
@no_debug_info_test
def test_stty_dash_a_before_and_afetr_invoking_lldb_command(self):
"""Test that 'stty -a' displays the same output before and after running the lldb command."""
import pexpect
if not which('expect'):
self.skipTest(
"The 'expect' program cannot be located, skip the test")
# The expect prompt.
expect_prompt = "expect[0-9.]+> "
# The default lldb prompt.
lldb_prompt = "(lldb) "
# So that the child gets torn down after the test.
import sys
if sys.version_info.major == 3:
self.child = pexpect.spawnu('expect')
else:
self.child = pexpect.spawn('expect')
child = self.child
child.expect(expect_prompt)
child.setecho(True)
if self.TraceOn():
child.logfile = sys.stdout
if self.platformIsDarwin():
child.sendline('set env(TERM) xterm')
else:
child.sendline('set env(TERM) vt100')
child.expect(expect_prompt)
child.sendline('puts $env(TERM)')
child.expect(expect_prompt)
# Turn on loggings for input/output to/from the child.
child.logfile_send = child_send1 = six.StringIO()
child.logfile_read = child_read1 = six.StringIO()
child.sendline('stty -a')
child.expect(expect_prompt)
# Now that the stage1 logging is done, restore logfile to None to
# stop further logging.
child.logfile_send = None
child.logfile_read = None
# Invoke the lldb command.
child.sendline(lldbtest_config.lldbExec)
child.expect_exact(lldb_prompt)
# Immediately quit.
child.sendline('quit')
child.expect(expect_prompt)
child.logfile_send = child_send2 = six.StringIO()
child.logfile_read = child_read2 = six.StringIO()
child.sendline('stty -a')
child.expect(expect_prompt)
child.sendline('exit')
# Now that the stage2 logging is done, restore logfile to None to
# stop further logging.
child.logfile_send = None
child.logfile_read = None
if self.TraceOn():
print("\n\nContents of child_send1:")
print(child_send1.getvalue())
print("\n\nContents of child_read1:")
print(child_read1.getvalue())
print("\n\nContents of child_send2:")
print(child_send2.getvalue())
print("\n\nContents of child_read2:")
print(child_read2.getvalue())
stty_output1_lines = child_read1.getvalue().splitlines()
stty_output2_lines = child_read2.getvalue().splitlines()
zipped = list(zip(stty_output1_lines, stty_output2_lines))
for tuple in zipped:
if self.TraceOn():
print("tuple->%s" % str(tuple))
# Every line should compare equal until the first blank line.
if len(tuple[0]) == 0:
break
self.assertTrue(tuple[0] == tuple[1])
|
"""Classes that replace tkinter gui objects used by an object being tested.
A gui object is anything with a master or parent paramenter, which is
typically required in spite of what the doc strings say.
"""
class Event:
'''Minimal mock with attributes for testing event handlers.
This is not a gui object, but is used as an argument for callbacks
that access attributes of the event passed. If a callback ignores
the event, other than the fact that is happened, pass 'event'.
Keyboard, mouse, window, and other sources generate Event instances.
Event instances have the following attributes: serial (number of
event), time (of event), type (of event as number), widget (in which
event occurred), and x,y (position of mouse). There are other
attributes for specific events, such as keycode for key events.
tkinter.Event.__doc__ has more but is still not complete.
'''
def __init__(self, **kwds):
"Create event with attributes needed for test"
self.__dict__.update(kwds)
class Var:
"Use for String/Int/BooleanVar: incomplete"
def __init__(self, master=None, value=None, name=None):
self.master = master
self.value = value
self.name = name
def set(self, value):
self.value = value
def get(self):
return self.value
class Mbox_func:
"""Generic mock for messagebox functions, which all have the same signature.
Instead of displaying a message box, the mock's call method saves the
arguments as instance attributes, which test functions can then examime.
The test can set the result returned to ask function
"""
def __init__(self, result=None):
self.result = result # Return None for all show funcs
def __call__(self, title, message, *args, **kwds):
# Save all args for possible examination by tester
self.title = title
self.message = message
self.args = args
self.kwds = kwds
return self.result # Set by tester for ask functions
class Mbox:
"""Mock for tkinter.messagebox with an Mbox_func for each function.
This module was 'tkMessageBox' in 2.x; hence the 'import as' in 3.x.
Example usage in test_module.py for testing functions in module.py:
---
from idlelib.idle_test.mock_tk import Mbox
import module
orig_mbox = module.tkMessageBox
showerror = Mbox.showerror # example, for attribute access in test methods
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
module.tkMessageBox = orig_mbox
---
For 'ask' functions, set func.result return value before calling the method
that uses the message function. When tkMessageBox functions are the
only gui alls in a method, this replacement makes the method gui-free,
"""
askokcancel = Mbox_func() # True or False
askquestion = Mbox_func() # 'yes' or 'no'
askretrycancel = Mbox_func() # True or False
askyesno = Mbox_func() # True or False
askyesnocancel = Mbox_func() # True, False, or None
showerror = Mbox_func() # None
showinfo = Mbox_func() # None
showwarning = Mbox_func() # None
from _tkinter import TclError
class Text:
"""A semi-functional non-gui replacement for tkinter.Text text editors.
The mock's data model is that a text is a list of \n-terminated lines.
The mock adds an empty string at the beginning of the list so that the
index of actual lines start at 1, as with Tk. The methods never see this.
Tk initializes files with a terminal \n that cannot be deleted. It is
invisible in the sense that one cannot move the cursor beyond it.
This class is only tested (and valid) with strings of ascii chars.
For testing, we are not concerned with Tk Text's treatment of,
for instance, 0-width characters or character + accent.
"""
def __init__(self, master=None, cnf={}, **kw):
'''Initialize mock, non-gui, text-only Text widget.
At present, all args are ignored. Almost all affect visual behavior.
There are just a few Text-only options that affect text behavior.
'''
self.data = ['', '\n']
def index(self, index):
"Return string version of index decoded according to current text."
return "%s.%s" % self._decode(index, endflag=1)
def _decode(self, index, endflag=0):
"""Return a (line, char) tuple of int indexes into self.data.
This implements .index without converting the result back to a string.
The result is constrained by the number of lines and linelengths of
self.data. For many indexes, the result is initially (1, 0).
The input index may have any of several possible forms:
* line.char float: converted to 'line.char' string;
* 'line.char' string, where line and char are decimal integers;
* 'line.char lineend', where lineend='lineend' (and char is ignored);
* 'line.end', where end='end' (same as above);
* 'insert', the positions before terminal \n;
* 'end', whose meaning depends on the endflag passed to ._endex.
* 'sel.first' or 'sel.last', where sel is a tag -- not implemented.
"""
if isinstance(index, (float, bytes)):
index = str(index)
try:
index=index.lower()
except AttributeError:
raise TclError('bad text index "%s"' % index) from None
lastline = len(self.data) - 1 # same as number of text lines
if index == 'insert':
return lastline, len(self.data[lastline]) - 1
elif index == 'end':
return self._endex(endflag)
line, char = index.split('.')
line = int(line)
# Out of bounds line becomes first or last ('end') index
if line < 1:
return 1, 0
elif line > lastline:
return self._endex(endflag)
linelength = len(self.data[line]) -1 # position before/at \n
if char.endswith(' lineend') or char == 'end':
return line, linelength
# Tk requires that ignored chars before ' lineend' be valid int
# Out of bounds char becomes first or last index of line
char = int(char)
if char < 0:
char = 0
elif char > linelength:
char = linelength
return line, char
def _endex(self, endflag):
'''Return position for 'end' or line overflow corresponding to endflag.
-1: position before terminal \n; for .insert(), .delete
0: position after terminal \n; for .get, .delete index 1
1: same viewed as beginning of non-existent next line (for .index)
'''
n = len(self.data)
if endflag == 1:
return n, 0
else:
n -= 1
return n, len(self.data[n]) + endflag
def insert(self, index, chars):
"Insert chars before the character at index."
if not chars: # ''.splitlines() is [], not ['']
return
chars = chars.splitlines(True)
if chars[-1][-1] == '\n':
chars.append('')
line, char = self._decode(index, -1)
before = self.data[line][:char]
after = self.data[line][char:]
self.data[line] = before + chars[0]
self.data[line+1:line+1] = chars[1:]
self.data[line+len(chars)-1] += after
def get(self, index1, index2=None):
"Return slice from index1 to index2 (default is 'index1+1')."
startline, startchar = self._decode(index1)
if index2 is None:
endline, endchar = startline, startchar+1
else:
endline, endchar = self._decode(index2)
if startline == endline:
return self.data[startline][startchar:endchar]
else:
lines = [self.data[startline][startchar:]]
for i in range(startline+1, endline):
lines.append(self.data[i])
lines.append(self.data[endline][:endchar])
return ''.join(lines)
def delete(self, index1, index2=None):
'''Delete slice from index1 to index2 (default is 'index1+1').
Adjust default index2 ('index+1) for line ends.
Do not delete the terminal \n at the very end of self.data ([-1][-1]).
'''
startline, startchar = self._decode(index1, -1)
if index2 is None:
if startchar < len(self.data[startline])-1:
# not deleting \n
endline, endchar = startline, startchar+1
elif startline < len(self.data) - 1:
# deleting non-terminal \n, convert 'index1+1 to start of next line
endline, endchar = startline+1, 0
else:
# do not delete terminal \n if index1 == 'insert'
return
else:
endline, endchar = self._decode(index2, -1)
# restricting end position to insert position excludes terminal \n
if startline == endline and startchar < endchar:
self.data[startline] = self.data[startline][:startchar] + \
self.data[startline][endchar:]
elif startline < endline:
self.data[startline] = self.data[startline][:startchar] + \
self.data[endline][endchar:]
startline += 1
for i in range(startline, endline+1):
del self.data[startline]
def compare(self, index1, op, index2):
line1, char1 = self._decode(index1)
line2, char2 = self._decode(index2)
if op == '<':
return line1 < line2 or line1 == line2 and char1 < char2
elif op == '<=':
return line1 < line2 or line1 == line2 and char1 <= char2
elif op == '>':
return line1 > line2 or line1 == line2 and char1 > char2
elif op == '>=':
return line1 > line2 or line1 == line2 and char1 >= char2
elif op == '==':
return line1 == line2 and char1 == char2
elif op == '!=':
return line1 != line2 or char1 != char2
else:
raise TclError('''bad comparison operator "%s":'''
'''must be <, <=, ==, >=, >, or !=''' % op)
# The following Text methods normally do something and return None.
# Whether doing nothing is sufficient for a test will depend on the test.
def mark_set(self, name, index):
"Set mark *name* before the character at index."
pass
def mark_unset(self, *markNames):
"Delete all marks in markNames."
def tag_remove(self, tagName, index1, index2=None):
"Remove tag tagName from all characters between index1 and index2."
pass
# The following Text methods affect the graphics screen and return None.
# Doing nothing should always be sufficient for tests.
def scan_dragto(self, x, y):
"Adjust the view of the text according to scan_mark"
def scan_mark(self, x, y):
"Remember the current X, Y coordinates."
def see(self, index):
"Scroll screen to make the character at INDEX is visible."
pass
# The following is a Misc method inherited by Text.
# It should properly go in a Misc mock, but is included here for now.
def bind(sequence=None, func=None, add=None):
"Bind to this widget at event sequence a call to function func."
pass
|
import sys
import matplotlib.pyplot as plt
import numpy as np
sys.path.append('../../')
from util import readCSVToMatrix
file_path = sys.argv[1]
file = open(file_path)
print file
data = readCSVToMatrix(file, delimiter=',')
print data
fig, ax = plt.subplots(1)
ax.fill_between(data[:,0], data[:,1]+data[:,2], data[:,1]-data[:,2], facecolor='blue', alpha=0.5, label='$\pm \sigma$')
ax.plot(data[:,0], data[:,1], alpha=0.7, label='$\mu$')
ax.set_title('Scenario Space convergence for PPR')
ax.legend(loc='upper right')
ax.set_xlabel('number of scenario samples')
ax.set_ylabel('standard deviation $\mu$ $\pm \sigma$')
plt.show()
|
import logging
import os
import tempfile
import getpass
import werkzeug.urls
import werkzeug.exceptions
from openid import oidutil
from openid.store import filestore
from openid.consumer import consumer
from openid.cryptutil import randomString
from openid.extensions import ax, sreg
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
import openerp.http as http
from openerp.http import request
from .. import utils
_logger = logging.getLogger(__name__)
oidutil.log = _logger.debug
def get_system_user():
"""Return system user info string, such as USERNAME-EUID"""
try:
info = getpass.getuser()
except ImportError:
if os.name == 'nt':
# when there is no 'USERNAME' in environment, getpass.getuser()
# fail when trying to import 'pwd' module - which is unix only.
# In that case we have to fallback to real win32 API.
import win32api
info = win32api.GetUserName()
else:
raise
euid = getattr(os, 'geteuid', None) # Non available on some platforms
if euid is not None:
info = '%s-%d' % (info, euid())
return info
_storedir = os.path.join(tempfile.gettempdir(),
'openerp-auth_openid-%s-store' % get_system_user())
class GoogleAppsAwareConsumer(consumer.GenericConsumer):
def complete(self, message, endpoint, return_to):
if message.getOpenIDNamespace() == consumer.OPENID2_NS:
server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', '')
if server_url.startswith('https://www.google.com/a/'):
assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle')
assoc = self.store.getAssociation(server_url, assoc_handle)
if assoc:
# update fields
for attr in ['claimed_id', 'identity']:
value = message.getArg(consumer.OPENID2_NS, attr, '')
value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % werkzeug.url_quote_plus(value)
message.setArg(consumer.OPENID2_NS, attr, value)
# now, resign the message
message.delArg(consumer.OPENID2_NS, 'sig')
message.delArg(consumer.OPENID2_NS, 'signed')
message = assoc.signMessage(message)
return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to)
class OpenIDController(http.Controller):
_store = filestore.FileOpenIDStore(_storedir)
_REQUIRED_ATTRIBUTES = ['email']
_OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split()
def _add_extensions(self, oidrequest):
"""Add extensions to the oidrequest"""
sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES,
optional=self._OPTIONAL_ATTRIBUTES)
oidrequest.addExtension(sreg_request)
ax_request = ax.FetchRequest()
for alias in self._REQUIRED_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=True, alias=alias))
for alias in self._OPTIONAL_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=False, alias=alias))
oidrequest.addExtension(ax_request)
def _get_attributes_from_success_response(self, success_response):
attrs = {}
all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES
sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response)
if sreg_resp:
for attr in all_attrs:
value = sreg_resp.get(attr)
if value is not None:
attrs[attr] = value
ax_resp = ax.FetchResponse.fromSuccessResponse(success_response)
if ax_resp:
for attr in all_attrs:
value = ax_resp.getSingle(utils.SREG2AX[attr])
if value is not None:
attrs[attr] = value
return attrs
def _get_realm(self):
return request.httprequest.host_url
@http.route('/auth_openid/login/verify_direct', type='http', auth='none')
def verify_direct(self, db, url):
result = self._verify(db, url)
if 'error' in result:
return werkzeug.exceptions.BadRequest(result['error'])
if result['action'] == 'redirect':
return werkzeug.utils.redirect(result['value'])
return result['value']
@http.route('/auth_openid/login/verify', type='json', auth='none')
def verify(self, db, url):
return self._verify(db, url)
def _verify(self, db, url):
redirect_to = werkzeug.urls.Href(request.httprequest.host_url + 'auth_openid/login/process')(session_id=request.session_id)
realm = self._get_realm()
session = dict(dbname=db, openid_url=url) # TODO add origin page ?
oidconsumer = consumer.Consumer(session, self._store)
try:
oidrequest = oidconsumer.begin(url)
except consumer.DiscoveryFailure, exc:
fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),)
return {'error': fetch_error_string, 'title': 'OpenID Error'}
if oidrequest is None:
return {'error': 'No OpenID services found', 'title': 'OpenID Error'}
request.session.openid_session = session
self._add_extensions(oidrequest)
if oidrequest.shouldSendRedirect():
redirect_url = oidrequest.redirectURL(realm, redirect_to)
return {'action': 'redirect', 'value': redirect_url, 'session_id': request.session_id}
else:
form_html = oidrequest.htmlMarkup(realm, redirect_to)
return {'action': 'post', 'value': form_html, 'session_id': request.session_id}
@http.route('/auth_openid/login/process', type='http', auth='none')
def process(self, **kw):
session = getattr(request.session, 'openid_session', None)
if not session:
return set_cookie_and_redirect('/')
oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer)
query = request.httprequest.args
info = oidconsumer.complete(query, request.httprequest.base_url)
display_identifier = info.getDisplayIdentifier()
session['status'] = info.status
if info.status == consumer.SUCCESS:
dbname = session['dbname']
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
Modules = registry.get('ir.module.module')
installed = Modules.search_count(cr, SUPERUSER_ID, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1
if installed:
Users = registry.get('res.users')
#openid_url = info.endpoint.canonicalID or display_identifier
openid_url = session['openid_url']
attrs = self._get_attributes_from_success_response(info)
attrs['openid_url'] = openid_url
session['attributes'] = attrs
openid_email = attrs.get('email', False)
domain = []
if openid_email:
domain += ['|', ('openid_email', '=', False)]
domain += [('openid_email', '=', openid_email)]
domain += [('openid_url', '=', openid_url), ('active', '=', True)]
ids = Users.search(cr, SUPERUSER_ID, domain)
assert len(ids) < 2
if ids:
user_id = ids[0]
login = Users.browse(cr, SUPERUSER_ID, user_id).login
key = randomString(utils.KEY_LENGTH, '0123456789abcdef')
Users.write(cr, SUPERUSER_ID, [user_id], {'openid_key': key})
# TODO fill empty fields with the ones from sreg/ax
cr.commit()
return login_and_redirect(dbname, login, key)
session['message'] = 'This OpenID identifier is not associated to any active users'
elif info.status == consumer.SETUP_NEEDED:
session['message'] = info.setup_url
elif info.status == consumer.FAILURE and display_identifier:
fmt = "Verification of %s failed: %s"
session['message'] = fmt % (display_identifier, info.message)
else: # FAILURE
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
session['message'] = 'Verification failed.'
return set_cookie_and_redirect('/#action=login&loginerror=1')
@http.route('/auth_openid/login/status', type='json', auth='none')
def status(self):
session = getattr(request.session, 'openid_session', {})
return {'status': session.get('status'), 'message': session.get('message')}
|
"""Support for Greenwave Reality (TCP Connected) lights."""
from datetime import timedelta
import logging
import os
import greenwavereality as greenwave
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_VERSION = "version"
SUPPORTED_FEATURES = SUPPORT_BRIGHTNESS
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string, vol.Required(CONF_VERSION): cv.positive_int}
)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Greenwave Reality Platform."""
host = config.get(CONF_HOST)
tokenfile = hass.config.path(".greenwave")
if config.get(CONF_VERSION) == 3:
if os.path.exists(tokenfile):
with open(tokenfile, encoding="utf8") as tokenfile:
token = tokenfile.read()
else:
try:
token = greenwave.grab_token(host, "hass", "homeassistant")
except PermissionError:
_LOGGER.error("The Gateway Is Not In Sync Mode")
raise
with open(tokenfile, "w+", encoding="utf8") as tokenfile:
tokenfile.write(token)
else:
token = None
bulbs = greenwave.grab_bulbs(host, token)
add_entities(
GreenwaveLight(device, host, token, GatewayData(host, token))
for device in bulbs.values()
)
class GreenwaveLight(LightEntity):
"""Representation of an Greenwave Reality Light."""
def __init__(self, light, host, token, gatewaydata):
"""Initialize a Greenwave Reality Light."""
self._did = int(light["did"])
self._name = light["name"]
self._state = int(light["state"])
self._brightness = greenwave.hass_brightness(light)
self._host = host
self._online = greenwave.check_online(light)
self._token = token
self._gatewaydata = gatewaydata
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
@property
def available(self):
"""Return True if entity is available."""
return self._online
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def is_on(self):
"""Return true if light is on."""
return self._state
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
temp_brightness = int((kwargs.get(ATTR_BRIGHTNESS, 255) / 255) * 100)
greenwave.set_brightness(self._host, self._did, temp_brightness, self._token)
greenwave.turn_on(self._host, self._did, self._token)
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
greenwave.turn_off(self._host, self._did, self._token)
def update(self):
"""Fetch new state data for this light."""
self._gatewaydata.update()
bulbs = self._gatewaydata.greenwave
self._state = int(bulbs[self._did]["state"])
self._brightness = greenwave.hass_brightness(bulbs[self._did])
self._online = greenwave.check_online(bulbs[self._did])
self._name = bulbs[self._did]["name"]
class GatewayData:
"""Handle Gateway data and limit updates."""
def __init__(self, host, token):
"""Initialize the data object."""
self._host = host
self._token = token
self._greenwave = greenwave.grab_bulbs(host, token)
@property
def greenwave(self):
"""Return Gateway API object."""
return self._greenwave
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the gateway."""
self._greenwave = greenwave.grab_bulbs(self._host, self._token)
return self._greenwave
|
'''stream.py: module for defining Stream and Grouping for python topology'''
import collections
from heron.common.src.python.utils.misc import default_serializer
from heron.proto import topology_pb2
class Stream(object):
"""Heron output stream
It is compatible with StreamParse API.
"""
DEFAULT_STREAM_ID = "default"
def __init__(self, fields=None, name=DEFAULT_STREAM_ID, direct=False):
"""
:type fields: `list` or `tuple` of `str`
:param fields: field names for this stream
:type name: str
:param name: name of stream. Defaults to ``default``
:type direct: bool
:param direct: whether or not this stream is direct. Default is ``False``
"""
if fields is None:
fields = []
elif isinstance(fields, (list, tuple)):
fields = list(fields)
for field in fields:
if not isinstance(field, str):
raise TypeError("All field names must be strings, given: %s" % str(field))
else:
raise TypeError("Stream fields must be a list, tuple or None, given: %s" % str(fields))
# self.fields is always list
self.fields = fields
if name is None:
raise TypeError("Stream's name cannot be None")
elif isinstance(name, str):
self.stream_id = name
else:
raise TypeError("Stream name must be a string, given: %s" % str(name))
if isinstance(direct, bool):
self.direct = direct
if self.direct:
raise NotImplementedError("Direct stream is not supported yet.")
else:
raise TypeError("'direct' must be either True or False, given: %s" % str(direct))
class Grouping(object):
"""Helper class for defining Grouping for Python topology"""
SHUFFLE = topology_pb2.Grouping.Value("SHUFFLE")
ALL = topology_pb2.Grouping.Value("ALL")
LOWEST = topology_pb2.Grouping.Value("LOWEST")
NONE = topology_pb2.Grouping.Value("NONE")
DIRECT = topology_pb2.Grouping.Value("DIRECT")
# gtype should contain topology_pb2.Grouping.Value("FIELDS")
FIELDS = collections.namedtuple('FieldGrouping', 'gtype, fields')
# gtype should contain topology_pb2.Grouping.Value("CUSTOM")
CUSTOM = collections.namedtuple('CustomGrouping', 'gtype, python_serialized')
# StreamParse compatibility
GLOBAL = LOWEST
LOCAL_OR_SHUFFLE = SHUFFLE
@classmethod
def is_grouping_sane(cls, gtype):
"""Checks if a given gtype is sane"""
if gtype == cls.SHUFFLE or gtype == cls.ALL or gtype == cls.LOWEST or gtype == cls.NONE:
return True
elif isinstance(gtype, cls.FIELDS):
return gtype.gtype == topology_pb2.Grouping.Value("FIELDS") and \
gtype.fields is not None
elif isinstance(gtype, cls.CUSTOM):
return gtype.gtype == topology_pb2.Grouping.Value("CUSTOM") and \
gtype.python_serialized is not None
else:
#pylint: disable=fixme
#TODO: DIRECT are not supported yet
return False
@classmethod
def fields(cls, *fields):
"""Field grouping"""
if len(fields) == 1 and isinstance(fields[0], list):
fields = fields[0]
else:
fields = list(fields)
for i in fields:
if not isinstance(i, str):
raise TypeError("Non-string cannot be specified in fields")
if not fields:
raise ValueError("List cannot be empty for fields grouping")
return cls.FIELDS(gtype=topology_pb2.Grouping.Value("FIELDS"),
fields=fields)
@classmethod
def custom(cls, classpath):
"""Custom grouping from a given classpath to an implementation of ICustomGrouping
This method does not exist in the Streamparse API.
:param classpath: classpath to the ICustomGrouping class to use
"""
if classpath is None or not isinstance(classpath, str):
raise TypeError("Argument to custom() must be classpath string to custom grouping, given: "
"%s" % str(classpath))
serialized = default_serializer.serialize(classpath)
return cls.custom_serialized(serialized, is_java=False)
@classmethod
def custom_serialized(cls, serialized, is_java=True):
"""Custom grouping from a given serialized string
This class is created for compatibility with ``custom_serialized(cls, java_serialized)`` method
of StreamParse API, although its functionality is not yet implemented (Java-serialized).
Currently only custom grouping implemented in Python is supported, and ``custom()`` method
should be used to indicate its classpath, rather than directly to use this method.
In the future, users can directly specify Java-serialized object with ``is_java=True`` in order
to use a custom grouping implemented in Java for python topology.
:param serialized: serialized classpath to custom grouping class to use (if python)
:param is_java: indicate whether this is Java serialized, or python serialized
"""
if not isinstance(serialized, bytes):
raise TypeError("Argument to custom_serialized() must be "
"a serialized Python class as bytes, given: %s" % str(serialized))
if not is_java:
return cls.CUSTOM(gtype=topology_pb2.Grouping.Value("CUSTOM"),
python_serialized=serialized)
else:
raise NotImplementedError("Custom grouping implemented in Java for Python topology"
"is not yet supported.")
@classmethod
def custom_object(cls, java_class_name, arg_list):
"""Tuples will be assigned to tasks by the given Java class."""
raise NotImplementedError("custom_object() method is not yet implemented")
|
from oslo.config import cfg
from oslo.db import exception as db_exc
import sqlalchemy as sa
from neutron.common import exceptions as exc
from neutron.db import model_base
from neutron.i18n import _LI, _LW
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
flat_opts = [
cfg.ListOpt('flat_networks',
default=[],
help=_("List of physical_network names with which flat "
"networks can be created. Use * to allow flat "
"networks with arbitrary physical_network names."))
]
cfg.CONF.register_opts(flat_opts, "ml2_type_flat")
class FlatAllocation(model_base.BASEV2):
"""Represent persistent allocation state of a physical network.
If a record exists for a physical network, then that physical
network has been allocated as a flat network.
"""
__tablename__ = 'ml2_flat_allocations'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
class FlatTypeDriver(api.TypeDriver):
"""Manage state for flat networks with ML2.
The FlatTypeDriver implements the 'flat' network_type. Flat
network segments provide connectivity between VMs and other
devices using any connected IEEE 802.1D conformant
physical_network, without the use of VLAN tags, tunneling, or
other segmentation mechanisms. Therefore at most one flat network
segment can exist on each available physical_network.
"""
def __init__(self):
self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks)
def _parse_networks(self, entries):
self.flat_networks = entries
if '*' in self.flat_networks:
LOG.info(_LI("Arbitrary flat physical_network names allowed"))
self.flat_networks = None
elif not all(self.flat_networks):
msg = _("physical network name is empty")
raise exc.InvalidInput(error_message=msg)
else:
LOG.info(_LI("Allowable flat physical_network names: %s"),
self.flat_networks)
def get_type(self):
return p_const.TYPE_FLAT
def initialize(self):
LOG.info(_LI("ML2 FlatTypeDriver initialization complete"))
def is_partial_segment(self, segment):
return False
def validate_provider_segment(self, segment):
physical_network = segment.get(api.PHYSICAL_NETWORK)
if not physical_network:
msg = _("physical_network required for flat provider network")
raise exc.InvalidInput(error_message=msg)
if self.flat_networks and physical_network not in self.flat_networks:
msg = (_("physical_network '%s' unknown for flat provider network")
% physical_network)
raise exc.InvalidInput(error_message=msg)
for key, value in segment.iteritems():
if value and key not in [api.NETWORK_TYPE,
api.PHYSICAL_NETWORK]:
msg = _("%s prohibited for flat provider network") % key
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
physical_network = segment[api.PHYSICAL_NETWORK]
with session.begin(subtransactions=True):
try:
LOG.debug("Reserving flat network on physical "
"network %s", physical_network)
alloc = FlatAllocation(physical_network=physical_network)
alloc.save(session)
except db_exc.DBDuplicateEntry:
raise exc.FlatNetworkInUse(
physical_network=physical_network)
return segment
def allocate_tenant_segment(self, session):
# Tenant flat networks are not supported.
return
def release_segment(self, session, segment):
physical_network = segment[api.PHYSICAL_NETWORK]
with session.begin(subtransactions=True):
count = (session.query(FlatAllocation).
filter_by(physical_network=physical_network).
delete())
if count:
LOG.debug("Releasing flat network on physical network %s",
physical_network)
else:
LOG.warning(_LW("No flat network found on physical network %s"),
physical_network)
|
"""
Copyright 2006 ThoughtWorks, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import test_default_server
import test_google
import test_i18n
import sys
def suite():
return unittest.TestSuite((\
unittest.makeSuite(test_default_server.TestDefaultServer),
unittest.makeSuite(test_google.TestGoogle),
unittest.makeSuite(test_i18n.TestI18n),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
sys.exit(not result.wasSuccessful())
|
import pytest
from thefuck.rules.git_not_command import match, get_new_command
from tests.utils import Command
@pytest.fixture
def git_not_command():
return """git: 'brnch' is not a git command. See 'git --help'.
Did you mean this?
branch
"""
@pytest.fixture
def git_not_command_one_of_this():
return """git: 'st' is not a git command. See 'git --help'.
Did you mean one of these?
status
reset
stage
stash
stats
"""
@pytest.fixture
def git_not_command_closest():
return '''git: 'tags' is not a git command. See 'git --help'.
Did you mean one of these?
\tstage
\ttag
'''
@pytest.fixture
def git_command():
return "* master"
def test_match(git_not_command, git_command, git_not_command_one_of_this):
assert match(Command('git brnch', stderr=git_not_command), None)
assert match(Command('git st', stderr=git_not_command_one_of_this), None)
assert not match(Command('ls brnch', stderr=git_not_command), None)
assert not match(Command('git branch', stderr=git_command), None)
def test_get_new_command(git_not_command, git_not_command_one_of_this,
git_not_command_closest):
assert get_new_command(Command('git brnch', stderr=git_not_command), None) \
== ['git branch']
assert get_new_command(Command('git st', stderr=git_not_command_one_of_this),
None) == ['git stats', 'git stash', 'git stage']
assert get_new_command(Command('git tags', stderr=git_not_command_closest),
None) == ['git tag', 'git stage']
|
'''tzinfo timezone information for US/Hawaii.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Hawaii(DstTzInfo):
'''US/Hawaii timezone definition. See datetime.tzinfo for details'''
zone = 'US/Hawaii'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1933,4,30,12,30,0),
d(1933,5,21,11,30,0),
d(1942,2,9,12,30,0),
d(1945,8,14,23,0,0),
d(1945,9,30,11,30,0),
d(1947,6,8,12,30,0),
]
_transition_info = [
i(-37800,0,'HST'),
i(-34200,3600,'HDT'),
i(-37800,0,'HST'),
i(-34200,3600,'HWT'),
i(-34200,3600,'HPT'),
i(-37800,0,'HST'),
i(-36000,0,'HST'),
]
Hawaii = Hawaii()
|
"""
***************************************************************************
lasthin.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasthin(LAStoolsAlgorithm):
THIN_STEP = "THIN_STEP"
OPERATION = "OPERATION"
OPERATIONS= ["lowest", "random", "highest"]
WITHHELD = "WITHHELD"
CLASSIFY_AS = "CLASSIFY_AS"
CLASSIFY_AS_CLASS = "CLASSIFY_AS_CLASS"
def defineCharacteristics(self):
self.name = "lasthin"
self.group = "LAStools"
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterNumber(lasthin.THIN_STEP,
self.tr("size of grid used for thinning"), 0, None, 1.0))
self.addParameter(ParameterSelection(lasthin.OPERATION,
self.tr("keep particular point per cell"), lasthin.OPERATIONS, 0))
self.addParameter(ParameterBoolean(lasthin.WITHHELD,
self.tr("mark thinned-away points as withheld"), False))
self.addParameter(ParameterBoolean(lasthin.CLASSIFY_AS,
self.tr("classify surviving points as class"), False))
self.addParameter(ParameterNumber(lasthin.CLASSIFY_AS_CLASS,
self.tr("class"), 0, None, 8))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasthin")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
step = self.getParameterValue(lasthin.THIN_STEP)
if step != 0.0:
commands.append("-step")
commands.append(str(step))
operation = self.getParameterValue(lasthin.OPERATION)
if operation != 0:
commands.append("-" + self.OPERATIONS[operation])
if self.getParameterValue(lasthin.WITHHELD):
commands.append("-withheld")
if self.getParameterValue(lasthin.CLASSIFY_AS):
commands.append("-classify_as")
commands.append(str(self.getParameterValue(lasthin.CLASSIFY_AS_CLASS)))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_full_argument_spec,
)
ANSIBLE_METADATA = {'status': 'preview',
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_clusters_facts
short_description: Retrieve facts about one or more oVirt clusters
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt clusters."
notes:
- "This module creates a new top-level C(ovirt_clusters) fact, which
contains a list of clusters."
options:
pattern:
description:
- "Search term which is accepted by oVirt search backend."
- "For example to search cluster X from datacenter Y use following pattern:
name=X and datacenter=Y"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
- ovirt_clusters_facts:
pattern: name=production*
- debug:
var: ovirt_clusters
'''
RETURN = '''
ovirt_clusters:
description: "List of dictionaries describing the clusters. Cluster attribues are mapped to dictionary keys,
all clusters attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/cluster."
returned: On success.
type: list
'''
def main():
argument_spec = ovirt_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
clusters_service = connection.system_service().clusters_service()
clusters = clusters_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_clusters=[
get_dict_of_struct(c) for c in clusters
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_dns
short_description: Manage BIG-IP device DNS settings
description:
- Manage BIG-IP device DNS settings.
version_added: 2.2
options:
cache:
description:
- Specifies whether the system caches DNS lookups or performs the
operation each time a lookup is needed. Please note that this applies
only to Access Policy Manager features, such as ACLs, web application
rewrites, and authentication.
choices:
- enabled
- disabled
- enable
- disable
name_servers:
description:
- A list of name servers that the system uses to validate DNS lookups
search:
description:
- A list of domains that the system searches for local domain lookups,
to resolve local host names.
ip_version:
description:
- Specifies whether the DNS specifies IP addresses using IPv4 or IPv6.
choices:
- 4
- 6
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value).
default: present
choices:
- absent
- present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set the DNS settings on the BIG-IP
bigip_device_dns:
name_servers:
- 208.67.222.222
- 208.67.220.220
search:
- localdomain
- lab.local
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
cache:
description: The new value of the DNS caching
returned: changed
type: str
sample: enabled
name_servers:
description: List of name servers that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
search:
description: List of search domains that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
ip_version:
description: IP version that was set that DNS will specify IP addresses in
returned: changed
type: int
sample: 4
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import is_empty_list
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import is_empty_list
class Parameters(AnsibleF5Parameters):
api_map = {
'dns.cache': 'cache',
'nameServers': 'name_servers',
'include': 'ip_version',
}
api_attributes = [
'nameServers', 'search', 'include',
]
updatables = [
'cache', 'name_servers', 'search', 'ip_version',
]
returnables = [
'cache', 'name_servers', 'search', 'ip_version',
]
absentables = [
'name_servers', 'search',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def search(self):
search = self._values['search']
if search is None:
return None
if isinstance(search, str) and search != "":
result = list()
result.append(str(search))
return result
if is_empty_list(search):
return []
return search
@property
def name_servers(self):
name_servers = self._values['name_servers']
if name_servers is None:
return None
if isinstance(name_servers, str) and name_servers != "":
result = list()
result.append(str(name_servers))
return result
if is_empty_list(name_servers):
return []
return name_servers
@property
def cache(self):
if self._values['cache'] is None:
return None
if str(self._values['cache']) in ['enabled', 'enable']:
return 'enable'
else:
return 'disable'
@property
def ip_version(self):
if self._values['ip_version'] == 6:
return "options inet6"
elif self._values['ip_version'] == 4:
return ""
else:
return None
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def ip_version(self):
if self._values['ip_version'] == 'options inet6':
return 6
elif self._values['ip_version'] == "":
return 4
else:
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def ip_version(self):
if self.want.ip_version is None:
return None
if self.want.ip_version == "" and self.have.ip_version is None:
return None
if self.want.ip_version == self.have.ip_version:
return None
if self.want.ip_version != self.have.ip_version:
return self.want.ip_version
@property
def name_servers(self):
state = self.want.state
if self.want.name_servers is None:
return None
if state == 'absent':
if self.have.name_servers is None and self.want.name_servers:
return None
if set(self.want.name_servers) == set(self.have.name_servers):
return []
if set(self.want.name_servers) != set(self.have.name_servers):
return list(set(self.want.name_servers).difference(self.have.name_servers))
if not self.want.name_servers:
if self.have.name_servers is None:
return None
if self.have.name_servers is not None:
return self.want.name_servers
if self.have.name_servers is None:
return self.want.name_servers
if set(self.want.name_servers) != set(self.have.name_servers):
return self.want.name_servers
@property
def search(self):
state = self.want.state
if self.want.search is None:
return None
if not self.want.search:
if self.have.search is None:
return None
if self.have.search is not None:
return self.want.search
if state == 'absent':
if self.have.search is None and self.want.search:
return None
if set(self.want.search) == set(self.have.search):
return []
if set(self.want.search) != set(self.have.search):
return list(set(self.want.search).difference(self.have.search))
if self.have.search is None:
return self.want.search
if set(self.want.search) != set(self.have.search):
return self.want.search
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = kwargs.pop('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _absent_changed_options(self):
diff = Difference(self.want, self.have)
absentables = Parameters.absentables
changed = dict()
for k in absentables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def should_absent(self):
result = self._absent_changed_options()
if result:
return True
return False
def absent(self):
self.have = self.read_current_from_device()
if not self.should_absent():
return False
if self.module.check_mode:
return True
self.absent_on_device()
return True
def read_dns_cache_setting(self):
uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
'dns.cache'
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def read_current_from_device(self):
cache = self.read_dns_cache_setting()
uri = "https://{0}:{1}/mgmt/tm/sys/dns/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if cache:
response['cache'] = cache['value']
return ApiParameters(params=response)
def update_on_device(self):
params = self.changes.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/sys/dns/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.cache:
uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
'dns.cache'
)
payload = {"value": self.want.cache}
resp = self.client.api.patch(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/dns/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
cache=dict(
choices=['disabled', 'enabled', 'disable', 'enable']
),
name_servers=dict(
type='list'
),
search=dict(
type='list'
),
ip_version=dict(
choices=[4, 6],
type='int'
),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_one_of = [
['name_servers', 'search', 'ip_version', 'cache']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_one_of=spec.required_one_of
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
test_names = []
test_sections = []
sample_types = []
descriptions = []
print_names = []
old_sort = []
uom = []
loinc_codes = []
sort_order = []
name_file = open('testName.txt','r')
test_section_file = open("testSections.txt",'r')
sample_type_file = open("sampleType.txt")
uom_file = open("newUOM.txt", 'r')
print_name_file = open("printName.txt")
loinc_file = open("loincCodes.txt")
results = open("output/testResults.txt", 'w')
sort_order_file = open("testOrder.txt")
def convert_to_existing_name( name ):
if name == 'Hemato-immunologie' or name == 'H�mato-immunologie':
return 'Hemto-Immunology'
elif name == 'Biochimie':
return 'Biochemistry'
elif name == 'H�matologie' or name == 'Hematologie':
return 'Hematology'
elif name == 'Immunologie':
return 'Immunology'
elif name == 'Immunologie-Serologie' or name == 'Serology':
return 'Serology-Immunology'
elif name == 'Bacteriologie':
return 'Bacteria'
return name
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
def use_uom( uom ):
return len(uom) > 0 and uom != 'n/a'
def remove_test_name_markup( test_name):
if '*' in test_name:
test_name = test_name.split(')')[1].strip()
return test_name
def create_description( test_name, sample_type):
test_name = remove_test_name_markup(test_name)
if ',' in sample_type:
sample_type = ''
else:
sample_type = '(' + sample_type + ')'
return esc_char(test_name + sample_type )
for line in name_file:
test_names.append(line.strip())
for line in print_name_file:
if '?' in line:
line = ''
print_names.append(line.strip())
print_name_file.close()
for line in test_section_file:
test_sections.append(line.strip())
for line in sample_type_file:
sample_types.append(line.strip())
name_file.close()
test_section_file.close()
for line in uom_file:
uom.append(line.strip())
uom_file.close()
for line in loinc_file:
loinc_codes.append(line.strip())
loinc_file.close()
base_sort_order = 600
last_order_value = base_sort_order
for line in sort_order_file:
if len(line) > 0:
value = int(line.strip()) * 10
sort_order.append( base_sort_order + value)
last_order_value = base_sort_order + value
else:
last_order_value += 10
sort_order.append( last_order_value )
sql_head = "INSERT INTO test( id, uom_id, description, reporting_description, is_active, is_reportable, lastupdated, test_section_id, local_abbrev, sort_order, name, loinc, orderable )\n\t"
results.write("The following should go in Tests.sql Note\n")
for row in range(0, len(test_names)):
if len(test_names[row]) > 1:
description = create_description(test_names[row], sample_types[row])
if description not in descriptions:
descriptions.append(description)
orderable = 'true'
if '*' in test_names[row]:
orderable = 'false'
results.write( sql_head)
results.write("VALUES ( nextval( 'test_seq' ) ," )
if use_uom(uom[row]):
results.write(" ( select id from clinlims.unit_of_measure where name='" + uom[row] + "') , ")
else:
results.write(" null , ")
results.write( description + " , " + esc_char(print_names[row]) + " , 'Y' , 'N' , now() , ")
results.write("(select id from clinlims.test_section where name = '" + convert_to_existing_name(test_sections[row]) + "' ) ,")
results.write( esc_char( remove_test_name_markup(test_names[row])[:20]) + " ," + str(sort_order[row]) + " , " + esc_char(remove_test_name_markup(test_names[row])) + " , " + esc_char(loinc_codes[row] ) + ", " + orderable + ");\n")
results.close()
print "Done look for results in testResults.txt"
|
"""CSSImportRule implements DOM Level 2 CSS CSSImportRule plus the
``name`` property from http://www.w3.org/TR/css3-cascade/#cascading."""
__all__ = ['CSSImportRule']
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import cssrule
import cssutils
import os
import urlparse
import xml.dom
class CSSImportRule(cssrule.CSSRule):
"""
Represents an @import rule within a CSS style sheet. The @import rule
is used to import style rules from other style sheets.
Format::
import
: IMPORT_SYM S*
[STRING|URI] S* [ medium [ COMMA S* medium]* ]? S* STRING? S* ';' S*
;
"""
def __init__(self, href=None, mediaText=None, name=None,
parentRule=None, parentStyleSheet=None, readonly=False):
"""
If readonly allows setting of properties in constructor only
:param href:
location of the style sheet to be imported.
:param mediaText:
A list of media types for which this style sheet may be used
as a string
:param name:
Additional name of imported style sheet
"""
super(CSSImportRule, self).__init__(parentRule=parentRule,
parentStyleSheet=parentStyleSheet)
self._atkeyword = u'@import'
self._styleSheet = None
# string or uri used for reserialization
self.hreftype = None
# prepare seq
seq = self._tempSeq()
seq.append(None, 'href')
#seq.append(None, 'media')
seq.append(None, 'name')
self._setSeq(seq)
# 1. media
if mediaText:
self.media = mediaText
else:
# must be all for @import
self.media = cssutils.stylesheets.MediaList(mediaText=u'all')
# 2. name
self.name = name
# 3. href and styleSheet
self.href = href
self._readonly = readonly
def __repr__(self):
if self._usemedia:
mediaText = self.media.mediaText
else:
mediaText = None
return u"cssutils.css.%s(href=%r, mediaText=%r, name=%r)" % (
self.__class__.__name__,
self.href,
self.media.mediaText,
self.name)
def __str__(self):
if self._usemedia:
mediaText = self.media.mediaText
else:
mediaText = None
return u"<cssutils.css.%s object href=%r mediaText=%r name=%r at 0x%x>"\
% (self.__class__.__name__,
self.href,
mediaText,
self.name,
id(self))
_usemedia = property(lambda self: self.media.mediaText not in (u'', u'all'),
doc="if self.media is used (or simply empty)")
def _getCssText(self):
"""Return serialized property cssText."""
return cssutils.ser.do_CSSImportRule(self)
def _setCssText(self, cssText):
"""
:exceptions:
- :exc:`~xml.dom.HierarchyRequestErr`:
Raised if the rule cannot be inserted at this point in the
style sheet.
- :exc:`~xml.dom.InvalidModificationErr`:
Raised if the specified CSS string value represents a different
type of rule than the current one.
- :exc:`~xml.dom.NoModificationAllowedErr`:
Raised if the rule is readonly.
- :exc:`~xml.dom.SyntaxErr`:
Raised if the specified CSS string value has a syntax error and
is unparsable.
"""
super(CSSImportRule, self)._setCssText(cssText)
tokenizer = self._tokenize2(cssText)
attoken = self._nexttoken(tokenizer, None)
if self._type(attoken) != self._prods.IMPORT_SYM:
self._log.error(u'CSSImportRule: No CSSImportRule found: %s' %
self._valuestr(cssText),
error=xml.dom.InvalidModificationErr)
else:
# for closures: must be a mutable
new = {'keyword': self._tokenvalue(attoken),
'href': None,
'hreftype': None,
'media': None,
'name': None,
'wellformed': True
}
def __doname(seq, token):
# called by _string or _ident
new['name'] = self._stringtokenvalue(token)
seq.append(new['name'], 'name')
return ';'
def _string(expected, seq, token, tokenizer=None):
if 'href' == expected:
# href
new['href'] = self._stringtokenvalue(token)
new['hreftype'] = 'string'
seq.append(new['href'], 'href')
return 'media name ;'
elif 'name' in expected:
# name
return __doname(seq, token)
else:
new['wellformed'] = False
self._log.error(
u'CSSImportRule: Unexpected string.', token)
return expected
def _uri(expected, seq, token, tokenizer=None):
# href
if 'href' == expected:
uri = self._uritokenvalue(token)
new['hreftype'] = 'uri'
new['href'] = uri
seq.append(new['href'], 'href')
return 'media name ;'
else:
new['wellformed'] = False
self._log.error(
u'CSSImportRule: Unexpected URI.', token)
return expected
def _ident(expected, seq, token, tokenizer=None):
# medialist ending with ; which is checked upon too
if expected.startswith('media'):
mediatokens = self._tokensupto2(
tokenizer, importmediaqueryendonly=True)
mediatokens.insert(0, token) # push found token
last = mediatokens.pop() # retrieve ;
lastval, lasttyp = self._tokenvalue(last), self._type(last)
if lastval != u';' and lasttyp not in ('EOF',
self._prods.STRING):
new['wellformed'] = False
self._log.error(u'CSSImportRule: No ";" found: %s' %
self._valuestr(cssText), token=token)
newMedia = cssutils.stylesheets.MediaList(parentRule=self)
newMedia.mediaText = mediatokens
if newMedia.wellformed:
new['media'] = newMedia
seq.append(newMedia, 'media')
else:
new['wellformed'] = False
self._log.error(u'CSSImportRule: Invalid MediaList: %s' %
self._valuestr(cssText), token=token)
if lasttyp == self._prods.STRING:
# name
return __doname(seq, last)
else:
return 'EOF' # ';' is token "last"
else:
new['wellformed'] = False
self._log.error(u'CSSImportRule: Unexpected ident.', token)
return expected
def _char(expected, seq, token, tokenizer=None):
# final ;
val = self._tokenvalue(token)
if expected.endswith(';') and u';' == val:
return 'EOF'
else:
new['wellformed'] = False
self._log.error(
u'CSSImportRule: Unexpected char.', token)
return expected
# import : IMPORT_SYM S* [STRING|URI]
# S* [ medium [ ',' S* medium]* ]? ';' S*
# STRING? # see http://www.w3.org/TR/css3-cascade/#cascading
# ;
newseq = self._tempSeq()
wellformed, expected = self._parse(expected='href',
seq=newseq, tokenizer=tokenizer,
productions={'STRING': _string,
'URI': _uri,
'IDENT': _ident,
'CHAR': _char},
new=new)
# wellformed set by parse
ok = wellformed and new['wellformed']
# post conditions
if not new['href']:
ok = False
self._log.error(u'CSSImportRule: No href found: %s' %
self._valuestr(cssText))
if expected != 'EOF':
ok = False
self._log.error(u'CSSImportRule: No ";" found: %s' %
self._valuestr(cssText))
# set all
if ok:
self._setSeq(newseq)
self.atkeyword = new['keyword']
self.hreftype = new['hreftype']
self.name = new['name']
if new['media']:
self.media = new['media']
else:
# must be all for @import
self.media = cssutils.stylesheets.MediaList(mediaText=u'all')
# needs new self.media
self.href = new['href']
cssText = property(fget=_getCssText, fset=_setCssText,
doc="(DOM) The parsable textual representation of this rule.")
def _setHref(self, href):
# set new href
self._href = href
# update seq
for i, item in enumerate(self.seq):
val, type_ = item.value, item.type
if 'href' == type_:
self._seq[i] = (href, type_, item.line, item.col)
break
importedSheet = cssutils.css.CSSStyleSheet(media=self.media,
ownerRule=self,
title=self.name)
self.hrefFound = False
# set styleSheet
if href and self.parentStyleSheet:
# loading errors are all catched!
# relative href
parentHref = self.parentStyleSheet.href
if parentHref is None:
# use cwd instead
parentHref = cssutils.helper.path2url(os.getcwd()) + '/'
fullhref = urlparse.urljoin(parentHref, self.href)
# all possible exceptions are ignored
try:
usedEncoding, enctype, cssText = \
self.parentStyleSheet._resolveImport(fullhref)
if cssText is None:
# catched in next except below!
raise IOError('Cannot read Stylesheet.')
# contentEncoding with parentStyleSheet.overrideEncoding,
# HTTP or parent
encodingOverride, encoding = None, None
if enctype == 0:
encodingOverride = usedEncoding
elif 0 < enctype < 5:
encoding = usedEncoding
# inherit fetcher for @imports in styleSheet
importedSheet._href = fullhref
importedSheet._setFetcher(self.parentStyleSheet._fetcher)
importedSheet._setCssTextWithEncodingOverride(
cssText,
encodingOverride=encodingOverride,
encoding=encoding)
except (OSError, IOError, ValueError), e:
self._log.warn(u'CSSImportRule: While processing imported '
u'style sheet href=%s: %r'
% (self.href, e), neverraise=True)
else:
# used by resolveImports if to keep unprocessed href
self.hrefFound = True
self._styleSheet = importedSheet
_href = None # needs to be set
href = property(lambda self: self._href, _setHref,
doc=u"Location of the style sheet to be imported.")
def _setMedia(self, media):
"""
:param media:
a :class:`~cssutils.stylesheets.MediaList` or string
"""
self._checkReadonly()
if isinstance(media, basestring):
self._media = cssutils.stylesheets.MediaList(mediaText=media,
parentRule=self)
else:
media._parentRule = self
self._media = media
# update seq
ihref = 0
for i, item in enumerate(self.seq):
if item.type == 'href':
ihref = i
elif item.type == 'media':
self.seq[i] = (self._media, 'media', None, None)
break
else:
# if no media until now add after href
self.seq.insert(ihref+1,
self._media, 'media', None, None)
media = property(lambda self: self._media, _setMedia,
doc=u"(DOM) A list of media types for this rule "
u"of type :class:`~cssutils.stylesheets.MediaList`.")
def _setName(self, name=u''):
"""Raises xml.dom.SyntaxErr if name is not a string."""
if name is None or isinstance(name, basestring):
# "" or '' handled as None
if not name:
name = None
# save name
self._name = name
# update seq
for i, item in enumerate(self.seq):
val, typ = item.value, item.type
if 'name' == typ:
self._seq[i] = (name, typ, item.line, item.col)
break
# set title of imported sheet
if self.styleSheet:
self.styleSheet.title = name
else:
self._log.error(u'CSSImportRule: Not a valid name: %s' % name)
name = property(lambda self: self._name, _setName,
doc=u"An optional name for the imported sheet.")
styleSheet = property(lambda self: self._styleSheet,
doc=u"(readonly) The style sheet referred to by this "
u"rule.")
type = property(lambda self: self.IMPORT_RULE,
doc=u"The type of this rule, as defined by a CSSRule "
u"type constant.")
def _getWellformed(self):
"Depending on if media is used at all."
if self._usemedia:
return bool(self.href and self.media.wellformed)
else:
return bool(self.href)
wellformed = property(_getWellformed)
|
from __future__ import division, print_function, unicode_literals
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4, landscape
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT
from reportlab.lib.colors import HexColor
from geraldo import ReportBand
from geraldo import ObjectValue, Label
import os
DIRNAME = os.path.dirname(__file__)
''' Margens e tamanhos padronizados '''
RETRATO = A4
PAISAGEM = landscape(A4)
MARGEM_SUPERIOR = 0.8*cm
MARGEM_INFERIOR = 0.8*cm
MARGEM_ESQUERDA = 0.8*cm
MARGEM_DIREITA = 0.8*cm
LARGURA_RETRATO = RETRATO[0] - MARGEM_ESQUERDA - MARGEM_DIREITA
LARGURA_PAISAGEM = PAISAGEM[0] - MARGEM_ESQUERDA - MARGEM_DIREITA
FONTES_ADICIONAIS = {
#u'Gentium Book Basic': (
#(u'Gentium Book Basic' , DIRNAME + u'/fonts/genbkbasr.ttf' , False, False),
#(u'Gentium Book Basic Bold' , DIRNAME + u'/fonts/genbkbasb.ttf' , True , False),
#(u'Gentium Book Basic Italic' , DIRNAME + u'/fonts/genbkbasi.ttf' , False, True),
#(u'Gentium Book Basic Bold Italic', DIRNAME + u'/fonts/genbkbasbi.ttf', True , True),
#),
u'DejaVu Sans': (
(u'DejaVu Sans' , DIRNAME + u'/fonts/DejaVuSans.ttf' , False, False),
(u'DejaVu Sans Bold' , DIRNAME + u'/fonts/DejaVuSans-Bold.ttf' , True , False),
(u'DejaVu Sans Italic' , DIRNAME + u'/fonts/DejaVuSans-Oblique.ttf' , False, True),
(u'DejaVu Sans Bold Italic', DIRNAME + u'/fonts/DejaVuSans-BoldOblique.ttf', True , True),
),
}
FONTE_NORMAL = 'DejaVu Sans'
FONTE_NEGRITO = FONTE_NORMAL + ' Bold'
FONTE_ITALICO = FONTE_NORMAL + ' Italic'
FONTE_NEGRITO_ITALICO = FONTE_NORMAL + ' Bold Italic'
FONTE_TAMANHO_5 = 5
FONTE_TAMANHO_6 = FONTE_TAMANHO_5 + 1
FONTE_TAMANHO_7 = FONTE_TAMANHO_5 + 2
FONTE_TAMANHO_8 = FONTE_TAMANHO_5 + 3
FONTE_TAMANHO_85 = FONTE_TAMANHO_5 + 3.5
FONTE_TAMANHO_9 = FONTE_TAMANHO_5 + 4
FONTE_TAMANHO_10 = FONTE_TAMANHO_5 * 2
FONTE_TAMANHO_11 = FONTE_TAMANHO_10 + 1
FONTE_TAMANHO_12 = FONTE_TAMANHO_10 + 2
FONTE_TAMANHO_14 = FONTE_TAMANHO_10 + 4
FONTE_TAMANHO_18 = FONTE_TAMANHO_10 + 8
FONTE_TAMANHO_40 = FONTE_TAMANHO_10 * 4
VERMELHO_CARIMBO = HexColor(0xff9393)
CINZA_MARCADAGUA = HexColor(0x939393)
DESCRITIVO_BLOCO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_9, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_11}
DESCRICAO_VALOR_TOTAL = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_10, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_12}
VALOR_TOTAL = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_10, 'alignment': TA_RIGHT , 'leading': FONTE_TAMANHO_12}
DESCRITIVO_CAMPO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_5}
DESCRITIVO_CAMPO_NEGRITO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_5}
DESCRITIVO_PRODUTO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_5, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_7}
DADO_CHAVE = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_85, 'alignment': TA_CENTER}
DADO_VARIAVEL = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_9, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_11}
TITULO_CAMPO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'leading': FONTE_TAMANHO_9}
DADO_CAMPO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_7, 'leading': FONTE_TAMANHO_9}
DADO_CAMPO_NORMAL = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'leading': FONTE_TAMANHO_9}
DADO_CAMPO_NEGRITO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_10, 'leading': FONTE_TAMANHO_12}
DADO_CAMPO_NUMERICO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'alignment': TA_RIGHT, 'leading': FONTE_TAMANHO_9}
DADO_CAMPO_NUMERICO_NEGRITO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_7, 'alignment': TA_RIGHT, 'leading': FONTE_TAMANHO_9}
DADO_CAMPO_CENTRALIZADO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_9}
DADO_PRODUTO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'leading': FONTE_TAMANHO_8}
DADO_PRODUTO_NUMERICO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'alignment': TA_RIGHT, 'leading': FONTE_TAMANHO_8}
DADO_PRODUTO_CENTRALIZADO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_8}
DADO_COMPLEMENTAR = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'leading': FONTE_TAMANHO_8}
DESCRITIVO_DANFE = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_12, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_14}
DESCRITIVO_NUMERO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_10, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_10}
DESCRITIVO_DANFE_GERAL = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_7}
DESCRITIVO_DANFE_ES = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'alignment': TA_LEFT, 'leading': FONTE_TAMANHO_7}
OBS_CONTINGENCIA = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_18, 'alignment': TA_CENTER, 'textColor': CINZA_MARCADAGUA}
OBS_HOMOLOGACAO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_40, 'alignment': TA_CENTER, 'textColor': VERMELHO_CARIMBO}
OBS_CANCELAMENTO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_40, 'leading': FONTE_TAMANHO_40+24, 'alignment': TA_CENTER, 'textColor': VERMELHO_CARIMBO, 'borderWidth': 3, 'borderColor': VERMELHO_CARIMBO, 'borderRadius': 3}
DESCRITIVO_CAMPO_CANCELAMENTO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_5, 'leading': FONTE_TAMANHO_5, 'textColor': VERMELHO_CARIMBO, 'backColor': 'white'}
DADO_VARIAVEL_CANCELAMENTO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_9, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_11, 'textColor': VERMELHO_CARIMBO}
DADO_IMPRESSAO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_5, 'leading': FONTE_TAMANHO_7}
EMIT_NOME = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_12, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_14}
EMIT_DADOS = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_8, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_10}
class LabelMargemEsquerda(Label):
def __init__(self):
super(LabelMargemEsquerda, self).__init__()
#self.borders_stroke_width = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': 0.1}
self.borders = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': False}
self.padding_top = 0.08*cm
self.padding_left = 0.08*cm
self.padding_bottom = 0.08*cm
self.padding_right = 0.08*cm
self.style = DESCRITIVO_CAMPO
self.height = 0.70*cm
class LabelMargemDireita(LabelMargemEsquerda):
def __init__(self):
super(LabelMargemDireita, self).__init__()
self.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': 0.1}
class Titulo(Label):
def __init__(self, *args, **kwargs):
super(Titulo, self).__init__(*args, **kwargs)
#self.borders = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': False}
self.padding_top = 0.1*cm
self.padding_left = 0.1*cm
self.padding_bottom = 0.1*cm
self.padding_right = 0.1*cm
self.style = TITULO_CAMPO
self.height = 0.42*cm
class Campo(ObjectValue):
def __init__(self, *args, **kwargs):
super(Campo, self).__init__(*args, **kwargs)
self.padding_top = 0.1*cm
self.padding_left = 0.1*cm
self.padding_bottom = 0.1*cm
self.padding_right = 0.1*cm
self.style = DADO_CAMPO
self.height = 0.42*cm
class Texto(Label):
def __init__(self, *args, **kwargs):
super(Texto, self).__init__(*args, **kwargs)
self.padding_top = 0.1*cm
self.padding_left = 0.1*cm
self.padding_bottom = 0.1*cm
self.padding_right = 0.1*cm
self.style = DADO_CAMPO
self.height = 0.70*cm
class Descritivo(Label):
def __init__(self):
super(Descritivo, self).__init__()
#self.borders_stroke_width = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': 0.1}
#self.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': False}
self.padding_top = 0.03*cm
self.padding_left = 0.1*cm
#self.padding_bottom = 0.05*cm
self.padding_right = 0.1*cm
self.style = DESCRITIVO_BLOCO
self.height = 0.42*cm
class BandaRPS(ReportBand):
def __init__(self):
super(BandaRPS, self).__init__()
def _inclui_titulo(self, nome, titulo, top, left, width, height=None, margem_direita=False):
# Prepara o Label com o título
if margem_direita:
lbl = LabelMargemDireita()
else:
lbl = LabelMargemEsquerda()
lbl.name = 'lbl_' + nome
lbl.text = titulo
lbl.top = top
lbl.left = left
lbl.width = width
if height:
lbl.height = height
return lbl
def _inclui_campo(self, nome, conteudo, top, left, width, height=None):
fld = Campo()
fld.name = 'fld_' + nome
fld.attribute_name = conteudo
fld.top = top
fld.left = left
fld.width = width
if height:
fld.height = height
return fld
def _inclui_texto(self, nome, texto, top, left, width, height=None):
lbl = Texto()
lbl.name = 'txt_' + nome
lbl.text = texto
lbl.top = top
lbl.left = left
lbl.width = width
if height:
lbl.height = height
return lbl
def inclui_campo(self, nome, titulo, conteudo, top, left, width, height=None, margem_direita=False):
lbl = self._inclui_titulo(nome, titulo, top, left, width, height, margem_direita)
self.elements.append(lbl)
fld = self._inclui_campo(nome, conteudo, top, left, width, height)
fld.padding_top = 0.25*cm
self.elements.append(fld)
return lbl, fld
def inclui_campo_numerico(self, nome, titulo, conteudo, top, left, width, height=None, margem_direita=False):
lbl, fld = self.inclui_campo(nome, titulo, conteudo, top, left, width, height, margem_direita)
fld.style = DADO_CAMPO_NUMERICO
return lbl, fld
def inclui_campo_imposto(self, nome, titulo, conteudo, top, left, width, height=None, margem_direita=False):
borda = self._inclui_titulo(nome, '', top, left, width, height, margem_direita)
borda.height = 0.8*cm
self.elements.append(borda)
lbl = self._inclui_campo(nome, titulo, top, left, width, height)
lbl.style = DADO_CAMPO_CENTRALIZADO
lbl.padding_top = 0.08*cm
lbl.padding_bottom = 0.08*cm
lbl.height = 0.4*cm
self.elements.append(lbl)
top += 0.4*cm
fld = self._inclui_campo(nome, conteudo, top, left, width, height)
fld.style = DADO_CAMPO_NUMERICO_NEGRITO
fld.padding_top = 0.08*cm
fld.padding_bottom = 0.08*cm
fld.height = 0.4*cm
self.elements.append(fld)
return lbl, fld
def inclui_texto(self, nome, titulo, texto, top, left, width, height=None, margem_direita=False):
lbl = self._inclui_titulo(nome, titulo, top, left, width, height, margem_direita)
self.elements.append(lbl)
if texto:
txt = self._inclui_texto(nome, texto, top, left, width, height)
txt.padding_top = 0.25*cm
self.elements.append(txt)
else:
txt = None
return lbl, txt
def inclui_texto_numerico(self, nome, titulo, texto, top, left, width, height=None, margem_direita=False):
lbl, txt = self.inclui_texto(nome, titulo, texto, top, left, width, height, margem_direita)
if txt:
txt.style = DADO_CAMPO_NUMERICO
return lbl, txt
def inclui_descritivo(self, nome, titulo, top, left, width, height=None):
lbl = Descritivo()
lbl.name = 'dsc_' + nome
lbl.text = titulo
lbl.top = top
lbl.left = left
lbl.width = width
if height:
lbl.height = height
self.elements.append(lbl)
return lbl
def inclui_texto_sem_borda(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self._inclui_texto(nome, texto, top, left, width, height)
txt.padding_top = 0.1*cm
self.elements.append(txt)
return txt
def inclui_campo_sem_borda(self, nome, conteudo, top, left, width, height=None, margem_direita=False):
fld = self._inclui_campo(nome, conteudo, top, left, width, height)
fld.padding_top = 0.1*cm
self.elements.append(fld)
return fld
def inclui_descritivo_item(self, nome, titulo, top, left, width, height=None, margem_direita=False):
lbl = self._inclui_titulo(nome, titulo, top, left, width, height, margem_direita)
lbl.style = DESCRITIVO_PRODUTO
lbl.padding_top = 0.05*cm
lbl.padding_left = 0.05*cm
lbl.padding_bottom = 0.05*cm
lbl.padding_right = 0.05*cm
if height:
lbl.height = height
else:
lbl.height = 0.42*cm
self.elements.append(lbl)
return lbl
def inclui_campo_item(self, nome, conteudo, top, left, width, height=None, margem_direita=False):
fld = self._inclui_campo(nome, conteudo, top, left, width, height)
if margem_direita:
fld.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': 0.1}
else:
fld.borders = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': False}
fld.style = DADO_PRODUTO
fld.padding_top = 0.05*cm
fld.padding_left = 0.05*cm
fld.padding_bottom = 0.05*cm
fld.padding_right = 0.05*cm
fld.auto_expand_height = True
if height:
fld.height = height
else:
fld.height = 0.28*cm
self.elements.append(fld)
return fld
def inclui_campo_numerico_item(self, nome, conteudo, top, left, width, height=None, margem_direita=False):
fld = self.inclui_campo_item(nome, conteudo, top, left, width, height, margem_direita)
fld.style = DADO_PRODUTO_NUMERICO
return fld
def inclui_texto_produto(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self._inclui_texto(nome, texto, top, left, width, height)
txt.borders_stroke_width = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': 0.1}
if margem_direita:
txt.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': 0.1}
else:
txt.borders = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': False}
txt.style = DADO_PRODUTO
txt.padding_top = 0.05*cm
txt.padding_left = 0.05*cm
txt.padding_bottom = 0.05*cm
txt.padding_right = 0.05*cm
txt.auto_expand_height = True
if height:
txt.height = height
else:
txt.height = 0.28*cm
self.elements.append(txt)
return txt
def inclui_texto_numerico_produto(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self.inclui_texto_produto(nome, texto, top, left, width, height, margem_direita)
txt.style = DADO_PRODUTO_NUMERICO
return txt
def inclui_texto_centralizado_produto(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self.inclui_texto_produto(nome, texto, top, left, width, height, margem_direita)
txt.style = DADO_PRODUTO_CENTRALIZADO
return txt
|
"""
The sax module contains a collection of classes that provide a (D)ocument
(O)bject (M)odel representation of an XML document. The goal is to provide an
easy, intuitive interface for managing XML documents. Although, the term, DOM,
is used above, this model is B{far} better.
XML namespaces in suds are represented using a (2) element tuple containing the
prefix and the URI, e.g. I{('tns', 'http://myns')}
@var encoder: A I{pluggable} XML special character processor used to encode/
decode strings.
@type encoder: L{Encoder}
"""
from suds.sax.enc import Encoder
encoder = Encoder()
def splitPrefix(name):
"""
Split the name into a tuple (I{prefix}, I{name}). The first element in the
tuple is I{None} when the name does not have a prefix.
@param name: A node name containing an optional prefix.
@type name: basestring
@return: A tuple containing the (2) parts of I{name}
@rtype: (I{prefix}, I{name})
"""
if isinstance(name, basestring) and ':' in name:
return tuple(name.split(':', 1))
return None, name
class Namespace:
"""
The namespace class represents XML namespaces.
"""
default = (None, None)
xmlns = ('xml', 'http://www.w3.org/XML/1998/namespace')
xsdns = ('xs', 'http://www.w3.org/2001/XMLSchema')
xsins = ('xsi', 'http://www.w3.org/2001/XMLSchema-instance')
all = (xsdns, xsins)
@classmethod
def create(cls, p=None, u=None):
return p, u
@classmethod
def none(cls, ns):
return ns == cls.default
@classmethod
def xsd(cls, ns):
try:
return cls.w3(ns) and ns[1].endswith('XMLSchema')
except:
pass
return False
@classmethod
def xsi(cls, ns):
try:
return cls.w3(ns) and ns[1].endswith('XMLSchema-instance')
except:
pass
return False
@classmethod
def xs(cls, ns):
return cls.xsd(ns) or cls.xsi(ns)
@classmethod
def w3(cls, ns):
try:
return ns[1].startswith('http://www.w3.org')
except:
pass
return False
@classmethod
def isns(cls, ns):
try:
return isinstance(ns, tuple) and len(ns) == len(cls.default)
except:
pass
return False
|
import os
import sys
def _JoinPath(*path_parts):
return os.path.abspath(os.path.join(*path_parts))
def _AddDirToPythonPath(*path_parts):
path = _JoinPath(*path_parts)
if os.path.isdir(path) and path not in sys.path:
# Some call sites that use Telemetry assume that sys.path[0] is the
# directory containing the script, so we add these extra paths to right
# after sys.path[0].
sys.path.insert(1, path)
_CATAPULT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, os.path.pardir, os.path.pardir)
_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'battor')
|
"""
Custom version comparison filters for use in openshift-ansible
"""
from distutils.version import LooseVersion
def legacy_gte_function_builder(name, versions):
"""
Build and return a version comparison function.
Ex: name = 'oo_version_gte_3_1_or_1_1'
versions = {'enterprise': '3.1', 'origin': '1.1'}
returns oo_version_gte_3_1_or_1_1, a function which based on the
version and deployment type will return true if the provided
version is greater than or equal to the function's version
"""
enterprise_version = versions['enterprise']
origin_version = versions['origin']
def _gte_function(version, deployment_type):
"""
Dynamic function created by gte_function_builder.
Ex: version = '3.1'
deployment_type = 'openshift-enterprise'
returns True/False
"""
version_gte = False
if 'enterprise' in deployment_type:
if str(version) >= LooseVersion(enterprise_version):
version_gte = True
elif 'origin' in deployment_type:
if str(version) >= LooseVersion(origin_version):
version_gte = True
return version_gte
_gte_function.__name__ = name
return _gte_function
def gte_function_builder(name, gte_version):
"""
Build and return a version comparison function.
Ex: name = 'oo_version_gte_3_6'
version = '3.6'
returns oo_version_gte_3_6, a function which based on the
version will return true if the provided version is greater
than or equal to the function's version
"""
def _gte_function(version):
"""
Dynamic function created by gte_function_builder.
Ex: version = '3.1'
returns True/False
"""
version_gte = False
if str(version) >= LooseVersion(gte_version):
version_gte = True
return version_gte
_gte_function.__name__ = name
return _gte_function
class FilterModule(object):
"""
Filters for version checking.
"""
# Each element of versions is composed of (major, minor_start, minor_end)
# Origin began versioning 3.x with 3.6, so begin 3.x with 3.6.
versions = [(3, 6, 10)]
def __init__(self):
"""
Creates a new FilterModule for ose version checking.
"""
self._filters = {}
# For each set of (major, minor, minor_iterations)
for major, minor_start, minor_end in self.versions:
# For each minor version in the range
for minor in range(minor_start, minor_end):
# Create the function name
func_name = 'oo_version_gte_{}_{}'.format(major, minor)
# Create the function with the builder
func = gte_function_builder(func_name, "{}.{}.0".format(major, minor))
# Add the function to the mapping
self._filters[func_name] = func
# Create filters with special versioning requirements.
# Treat all Origin 1.x as special case.
legacy_filters = [{'name': 'oo_version_gte_3_1_or_1_1',
'versions': {'enterprise': '3.0.2.905',
'origin': '1.1.0'}},
{'name': 'oo_version_gte_3_1_1_or_1_1_1',
'versions': {'enterprise': '3.1.1',
'origin': '1.1.1'}},
{'name': 'oo_version_gte_3_2_or_1_2',
'versions': {'enterprise': '3.1.1.901',
'origin': '1.2.0'}},
{'name': 'oo_version_gte_3_3_or_1_3',
'versions': {'enterprise': '3.3.0',
'origin': '1.3.0'}},
{'name': 'oo_version_gte_3_4_or_1_4',
'versions': {'enterprise': '3.4.0',
'origin': '1.4.0'}},
{'name': 'oo_version_gte_3_5_or_1_5',
'versions': {'enterprise': '3.5.0',
'origin': '1.5.0'}}]
for legacy_filter in legacy_filters:
self._filters[legacy_filter['name']] = legacy_gte_function_builder(legacy_filter['name'],
legacy_filter['versions'])
def filters(self):
"""
Return the filters mapping.
"""
return self._filters
|
"""Expose regular shell commands as services."""
from __future__ import annotations
import asyncio
from contextlib import suppress
import logging
import shlex
import voluptuous as vol
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.typing import ConfigType
DOMAIN = "shell_command"
COMMAND_TIMEOUT = 60
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(cv.string)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the shell_command component."""
conf = config.get(DOMAIN, {})
cache: dict[str, tuple[str, str | None, template.Template | None]] = {}
async def async_service_handler(service: ServiceCall) -> None:
"""Execute a shell command service."""
cmd = conf[service.service]
if cmd in cache:
prog, args, args_compiled = cache[cmd]
elif " " not in cmd:
prog = cmd
args = None
args_compiled = None
cache[cmd] = prog, args, args_compiled
else:
prog, args = cmd.split(" ", 1)
args_compiled = template.Template(args, hass)
cache[cmd] = prog, args, args_compiled
if args_compiled:
try:
rendered_args = args_compiled.async_render(
variables=service.data, parse_result=False
)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
create_process = asyncio.create_subprocess_shell(
cmd,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
else:
# Template used. Break into list and use create_subprocess_exec
# (which uses shell=False) for security
shlexed_cmd = [prog] + shlex.split(rendered_args)
create_process = asyncio.create_subprocess_exec(
*shlexed_cmd,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
process = await create_process
try:
stdout_data, stderr_data = await asyncio.wait_for(
process.communicate(), COMMAND_TIMEOUT
)
except asyncio.TimeoutError:
_LOGGER.exception(
"Timed out running command: `%s`, after: %ss", cmd, COMMAND_TIMEOUT
)
if process:
with suppress(TypeError):
process.kill()
# https://bugs.python.org/issue43884
# pylint: disable=protected-access
process._transport.close() # type: ignore[attr-defined]
del process
return
if stdout_data:
_LOGGER.debug(
"Stdout of command: `%s`, return code: %s:\n%s",
cmd,
process.returncode,
stdout_data,
)
if stderr_data:
_LOGGER.debug(
"Stderr of command: `%s`, return code: %s:\n%s",
cmd,
process.returncode,
stderr_data,
)
if process.returncode != 0:
_LOGGER.exception(
"Error running command: `%s`, return code: %s", cmd, process.returncode
)
for name in conf:
hass.services.async_register(DOMAIN, name, async_service_handler)
return True
|
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.admin.images import views
urlpatterns = patterns('openstack_dashboard.dashboards.admin.images.views',
url(r'^images/$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<image_id>[^/]+)/detail/$',
views.DetailView.as_view(), name='detail')
)
|
import locale
def unicode_sorter(input):
""" This function implements sort keys for the german language according to
DIN 5007."""
# key1: compare words lowercase and replace umlauts according to DIN 5007
key1 = input.lower()
key1 = key1.replace(u"ä", u"a")
key1 = key1.replace(u"ö", u"o")
key1 = key1.replace(u"ü", u"u")
key1 = key1.replace(u"ß", u"ss")
# key2: sort the lowercase word before the uppercase word and sort
# the word with umlaut after the word without umlaut
#key2=input.swapcase()
# in case two words are the same according to key1, sort the words
# according to key2.
return key1
def get_queryset(model_class, manager=None):
if manager is not None and hasattr(model_class, manager):
queryset = getattr(model_class, manager)
else:
queryset = model_class._default_manager
return queryset
def serialize_results(results):
return [
{'value': item.pk, 'display': unicode(item)} for item in results
]
def get_keywords(field, value):
if value == '0':
keywords = {str("%s__isnull" % field): True}
else:
keywords = {str(field): str(value)}
return keywords
def sort_results(results):
"""Performs in-place sort of filterchain results."""
results.sort(cmp=locale.strcoll, key=lambda x: unicode_sorter(unicode(x)))
|
from __future__ import unicode_literals
import frappe
def before_install():
frappe.reload_doc("core", "doctype", "docfield")
frappe.reload_doc("core", "doctype", "docperm")
frappe.reload_doc("core", "doctype", "doctype")
def after_install():
# reset installed apps for re-install
frappe.db.set_global("installed_apps", '["frappe"]')
# core users / roles
install_docs = [
{'doctype':'User', 'name':'Administrator', 'first_name':'Administrator',
'email':'admin@example.com', 'enabled':1},
{'doctype':'User', 'name':'Guest', 'first_name':'Guest',
'email':'guest@example.com', 'enabled':1},
{'doctype':'UserRole', 'parent': 'Administrator', 'role': 'Administrator',
'parenttype':'User', 'parentfield':'user_roles'},
{'doctype':'UserRole', 'parent': 'Guest', 'role': 'Guest',
'parenttype':'User', 'parentfield':'user_roles'},
{'doctype': "Role", "role_name": "Report Manager"},
{'doctype': "Workflow State", "workflow_state_name": "Pending",
"icon": "question-sign", "style": ""},
{'doctype': "Workflow State", "workflow_state_name": "Approved",
"icon": "ok-sign", "style": "Success"},
{'doctype': "Workflow State", "workflow_state_name": "Rejected",
"icon": "remove", "style": "Danger"},
{'doctype': "Workflow Action", "workflow_action_name": "Approve"},
{'doctype': "Workflow Action", "workflow_action_name": "Reject"},
{'doctype': "Workflow Action", "workflow_action_name": "Review"}
]
for d in install_docs:
try:
frappe.get_doc(d).insert()
except frappe.NameError:
pass
# all roles to admin
frappe.get_doc("User", "Administrator").add_roles(*frappe.db.sql_list("""select name from tabRole"""))
# update admin password
from frappe.auth import _update_password
_update_password("Administrator", frappe.conf.get("admin_password"))
frappe.db.commit()
def before_tests():
frappe.db.sql("delete from `tabCustom Field`")
frappe.db.sql("delete from `tabEvent`")
frappe.db.commit()
frappe.clear_cache()
|
from allmydata.util import mathutil # from the pyutil library
"""
Read and write chunks from files.
Version 1.0.0.
A file is divided into blocks, each of which has size L{BLOCK_SIZE}
(except for the last block, which may be smaller). Blocks are encoded
into chunks. One publishes the hash of the entire file. Clients
who want to download the file first obtain the hash, then the clients
can receive chunks in any order. Cryptographic hashing is used to
verify each received chunk before writing to disk. Thus it is
impossible to download corrupt data if one has the correct file hash.
One obtains the hash of a complete file via
L{CompleteChunkFile.file_hash}. One can read chunks from a complete
file by the sequence operations of C{len()} and subscripting on a
L{CompleteChunkFile} object. One can open an empty or partially
downloaded file with L{PartialChunkFile}, and read and write chunks
to this file. A chunk will fail to write if its contents and index
are not consistent with the overall file hash passed to
L{PartialChunkFile} when the partial chunk file was first created.
The chunks have an overhead of less than 4% for files of size
less than C{10**20} bytes.
Benchmarks:
- On a 3 GHz Pentium 3, it took 3.4 minutes to first make a
L{CompleteChunkFile} object for a 4 GB file. Up to 10 MB of
memory was used as the constructor ran. A metafile filename
was passed to the constructor, and so the hash information was
written to the metafile. The object used a negligible amount
of memory after the constructor was finished.
- Creation of L{CompleteChunkFile} objects in future runs of the
program took negligible time, since the hash information was
already stored in the metafile.
@var BLOCK_SIZE: Size of a block. See L{BlockFile}.
@var MAX_CHUNK_SIZE: Upper bound on the size of a chunk.
See L{CompleteChunkFile}.
free (adj.): unencumbered; not under the control of others
Written by Connelly Barnes in 2005 and released into the
public domain with no warranty of any kind, either expressed
or implied. It probably won't make your computer catch on fire,
or eat your children, but it might. Use at your own risk.
"""
from allmydata.util import base32
from allmydata.util.hashutil import tagged_hash, tagged_pair_hash
__version__ = '1.0.0-allmydata'
BLOCK_SIZE = 65536
MAX_CHUNK_SIZE = BLOCK_SIZE + 4096
def roundup_pow2(x):
"""
Round integer C{x} up to the nearest power of 2.
"""
ans = 1
while ans < x:
ans *= 2
return ans
class CompleteBinaryTreeMixin:
"""
Adds convenience methods to a complete binary tree.
Assumes the total number of elements in the binary tree may be
accessed via C{__len__}, and that each element can be retrieved
using list subscripting.
Tree is indexed like so::
0
/ \
1 2
/ \ / \
3 4 5 6
/ \ / \ / \ / \
7 8 9 10 11 12 13 14
"""
def parent(self, i):
"""
Index of the parent of C{i}.
"""
if i < 1 or (hasattr(self, '__len__') and i >= len(self)):
raise IndexError('index out of range: ' + repr(i))
return (i - 1) // 2
def lchild(self, i):
"""
Index of the left child of C{i}.
"""
ans = 2 * i + 1
if i < 0 or (hasattr(self, '__len__') and ans >= len(self)):
raise IndexError('index out of range: ' + repr(i))
return ans
def rchild(self, i):
"""
Index of right child of C{i}.
"""
ans = 2 * i + 2
if i < 0 or (hasattr(self, '__len__') and ans >= len(self)):
raise IndexError('index out of range: ' + repr(i))
return ans
def sibling(self, i):
"""
Index of sibling of C{i}.
"""
parent = self.parent(i)
if self.lchild(parent) == i:
return self.rchild(parent)
else:
return self.lchild(parent)
def needed_for(self, i):
"""
Return a list of node indices that are necessary for the hash chain.
"""
if i < 0 or i >= len(self):
raise IndexError('index out of range: 0 >= %s < %s' % (i, len(self)))
needed = []
here = i
while here != 0:
needed.append(self.sibling(here))
here = self.parent(here)
return needed
def depth_first(self, i=0):
yield i, 0
try:
for child,childdepth in self.depth_first(self.lchild(i)):
yield child, childdepth+1
except IndexError:
pass
try:
for child,childdepth in self.depth_first(self.rchild(i)):
yield child, childdepth+1
except IndexError:
pass
def dump(self):
lines = []
for i,depth in self.depth_first():
lines.append("%s%3d: %s" % (" "*depth, i,
base32.b2a_or_none(self[i])))
return "\n".join(lines) + "\n"
def get_leaf_index(self, leafnum):
return self.first_leaf_num + leafnum
def get_leaf(self, leafnum):
return self[self.first_leaf_num + leafnum]
def depth_of(i):
"""Return the depth or level of the given node. Level 0 contains node 0
Level 1 contains nodes 1 and 2. Level 2 contains nodes 3,4,5,6."""
return mathutil.log_floor(i+1, 2)
def empty_leaf_hash(i):
return tagged_hash('Merkle tree empty leaf', "%d" % i)
def pair_hash(a, b):
return tagged_pair_hash('Merkle tree internal node', a, b)
class HashTree(CompleteBinaryTreeMixin, list):
"""
Compute Merkle hashes at any node in a complete binary tree.
Tree is indexed like so::
0
/ \
1 2
/ \ / \
3 4 5 6
/ \ / \ / \ / \
7 8 9 10 11 12 13 14 <- List passed to constructor.
"""
def __init__(self, L):
"""
Create complete binary tree from list of hash strings.
The list is augmented by hashes so its length is a power of 2, and
then this is used as the bottom row of the hash tree.
The augmenting is done so that if the augmented element is at index
C{i}, then its value is C{hash(tagged_hash('Merkle tree empty leaf',
'%d'%i))}.
"""
# Augment the list.
start = len(L)
end = roundup_pow2(len(L))
self.first_leaf_num = end - 1
L = L + [None] * (end - start)
for i in range(start, end):
L[i] = empty_leaf_hash(i)
# Form each row of the tree.
rows = [L]
while len(rows[-1]) != 1:
last = rows[-1]
rows += [[pair_hash(last[2*i], last[2*i+1])
for i in xrange(len(last)//2)]]
# Flatten the list of rows into a single list.
rows.reverse()
self[:] = sum(rows, [])
def needed_hashes(self, leafnum, include_leaf=False):
"""Which hashes will someone need to validate a given data block?
I am used to answer a question: supposing you have the data block
that is used to form leaf hash N, and you want to validate that it,
which hashes would you need?
I accept a leaf number and return a set of 'hash index' values, which
are integers from 0 to len(self). In the 'hash index' number space,
hash[0] is the root hash, while hash[len(self)-1] is the last leaf
hash.
This method can be used to find out which hashes you should request
from some untrusted source (usually the same source that provides the
data block), so you can minimize storage or transmission overhead. It
can also be used to determine which hashes you should send to a
remote data store so that it will be able to provide validatable data
in the future.
I will not include '0' (the root hash) in the result, since the root
is generally stored somewhere that is more trusted than the source of
the remaining hashes. I will include the leaf hash itself only if you
ask me to, by passing include_leaf=True.
"""
needed = set(self.needed_for(self.first_leaf_num + leafnum))
if include_leaf:
needed.add(self.first_leaf_num + leafnum)
return needed
class NotEnoughHashesError(Exception):
pass
class BadHashError(Exception):
pass
class IncompleteHashTree(CompleteBinaryTreeMixin, list):
"""I am a hash tree which may or may not be complete. I can be used to
validate inbound data from some untrustworthy provider who has a subset
of leaves and a sufficient subset of internal nodes.
Initially I am completely unpopulated. Over time, I will become filled
with hashes, just enough to validate particular leaf nodes.
If you desire to validate leaf number N, first find out which hashes I
need by calling needed_hashes(N). This will return a list of node numbers
(which will nominally be the sibling chain between the given leaf and the
root, but if I already have some of those nodes, needed_hashes(N) will
only return a subset). Obtain these hashes from the data provider, then
tell me about them with set_hash(i, HASH). Once I have enough hashes, you
can tell me the hash of the leaf with set_leaf_hash(N, HASH), and I will
either return None or raise BadHashError.
The first hash to be set will probably be 0 (the root hash), since this
is the one that will come from someone more trustworthy than the data
provider.
"""
def __init__(self, num_leaves):
L = [None] * num_leaves
start = len(L)
end = roundup_pow2(len(L))
self.first_leaf_num = end - 1
L = L + [None] * (end - start)
rows = [L]
while len(rows[-1]) != 1:
last = rows[-1]
rows += [[None for i in xrange(len(last)//2)]]
# Flatten the list of rows into a single list.
rows.reverse()
self[:] = sum(rows, [])
def needed_hashes(self, leafnum, include_leaf=False):
"""Which new hashes do I need to validate a given data block?
I am much like HashTree.needed_hashes(), except that I don't include
hashes that I already know about. When needed_hashes() is called on
an empty IncompleteHashTree, it will return the same set as a
HashTree of the same size. But later, once hashes have been added
with set_hashes(), I will ask for fewer hashes, since some of the
necessary ones have already been set.
"""
maybe_needed = set(self.needed_for(self.first_leaf_num + leafnum))
if include_leaf:
maybe_needed.add(self.first_leaf_num + leafnum)
return set([i for i in maybe_needed if self[i] is None])
def _name_hash(self, i):
name = "[%d of %d]" % (i, len(self))
if i >= self.first_leaf_num:
leafnum = i - self.first_leaf_num
numleaves = len(self) - self.first_leaf_num
name += " (leaf [%d] of %d)" % (leafnum, numleaves)
return name
def set_hashes(self, hashes={}, leaves={}):
"""Add a bunch of hashes to the tree.
I will validate these to the best of my ability. If I already have a
copy of any of the new hashes, the new values must equal the existing
ones, or I will raise BadHashError. If adding a hash allows me to
compute a parent hash, those parent hashes must match or I will raise
BadHashError. If I raise BadHashError, I will forget about all the
hashes that you tried to add, leaving my state exactly the same as
before I was called. If I return successfully, I will remember all
those hashes.
I insist upon being able to validate all of the hashes that were
given to me. If I cannot do this because I'm missing some hashes, I
will raise NotEnoughHashesError (and forget about all the hashes that
you tried to add). Note that this means that the root hash must
either be included in 'hashes', or it must have been provided at some
point in the past.
'leaves' is a dictionary uses 'leaf index' values, which range from 0
(the left-most leaf) to num_leaves-1 (the right-most leaf), and form
the base of the tree. 'hashes' uses 'hash_index' values, which range
from 0 (the root of the tree) to 2*num_leaves-2 (the right-most
leaf). leaf[i] is the same as hash[num_leaves-1+i].
The best way to use me is to start by obtaining the root hash from
some 'good' channel and populate me with it:
iht = IncompleteHashTree(numleaves)
roothash = trusted_channel.get_roothash()
iht.set_hashes(hashes={0: roothash})
Then use the 'bad' channel to obtain data block 0 and the
corresponding hash chain (a dict with the same hashes that
needed_hashes(0) tells you, e.g. {0:h0, 2:h2, 4:h4, 8:h8} when
len(L)=8). Hash the data block to create leaf0, then feed everything
into set_hashes() and see if it raises an exception or not::
otherhashes = untrusted_channel.get_hashes()
# otherhashes.keys() should == iht.needed_hashes(leaves=[0])
datablock0 = untrusted_channel.get_data(0)
leaf0 = HASH(datablock0)
# HASH() is probably hashutil.tagged_hash(tag, datablock0)
iht.set_hashes(otherhashes, leaves={0: leaf0})
If the set_hashes() call doesn't raise an exception, the data block
was valid. If it raises BadHashError, then either the data block was
corrupted or one of the received hashes was corrupted. If it raises
NotEnoughHashesError, then the otherhashes dictionary was incomplete.
"""
assert isinstance(hashes, dict)
for h in hashes.values():
assert isinstance(h, str)
assert isinstance(leaves, dict)
for h in leaves.values():
assert isinstance(h, str)
new_hashes = hashes.copy()
for leafnum,leafhash in leaves.iteritems():
hashnum = self.first_leaf_num + leafnum
if hashnum in new_hashes:
if new_hashes[hashnum] != leafhash:
raise BadHashError("got conflicting hashes in my "
"arguments: leaves[%d] != hashes[%d]"
% (leafnum, hashnum))
new_hashes[hashnum] = leafhash
remove_upon_failure = set() # we'll remove these if the check fails
# visualize this method in the following way:
# A: start with the empty or partially-populated tree as shown in
# the HashTree docstring
# B: add all of our input hashes to the tree, filling in some of the
# holes. Don't overwrite anything, but new values must equal the
# existing ones. Mark everything that was added with a red dot
# (meaning "not yet validated")
# C: start with the lowest/deepest level. Pick any red-dotted node,
# hash it with its sibling to compute the parent hash. Add the
# parent to the tree just like in step B (if the parent already
# exists, the values must be equal; if not, add our computed
# value with a red dot). If we have no sibling, throw
# NotEnoughHashesError, since we won't be able to validate this
# node. Remove the red dot. If there was a red dot on our
# sibling, remove it too.
# D: finish all red-dotted nodes in one level before moving up to
# the next.
# E: if we hit NotEnoughHashesError or BadHashError before getting
# to the root, discard every hash we've added.
try:
num_levels = depth_of(len(self)-1)
# hashes_to_check[level] is set(index). This holds the "red dots"
# described above
hashes_to_check = [set() for level in range(num_levels+1)]
# first we provisionally add all hashes to the tree, comparing
# any duplicates
for i,h in new_hashes.iteritems():
if self[i]:
if self[i] != h:
raise BadHashError("new hash %s does not match "
"existing hash %s at %s"
% (base32.b2a(h),
base32.b2a(self[i]),
self._name_hash(i)))
else:
level = depth_of(i)
hashes_to_check[level].add(i)
self[i] = h
remove_upon_failure.add(i)
for level in reversed(range(len(hashes_to_check))):
this_level = hashes_to_check[level]
while this_level:
i = this_level.pop()
if i == 0:
# The root has no sibling. How lonely. You can't
# really *check* the root; you either accept it
# because the caller told you what it is by including
# it in hashes, or you accept it because you
# calculated it from its two children. You probably
# want to set the root (from a trusted source) before
# adding any children from an untrusted source.
continue
siblingnum = self.sibling(i)
if self[siblingnum] is None:
# without a sibling, we can't compute a parent, and
# we can't verify this node
raise NotEnoughHashesError("unable to validate [%d]"%i)
parentnum = self.parent(i)
# make sure we know right from left
leftnum, rightnum = sorted([i, siblingnum])
new_parent_hash = pair_hash(self[leftnum], self[rightnum])
if self[parentnum]:
if self[parentnum] != new_parent_hash:
raise BadHashError("h([%d]+[%d]) != h[%d]" %
(leftnum, rightnum, parentnum))
else:
self[parentnum] = new_parent_hash
remove_upon_failure.add(parentnum)
parent_level = depth_of(parentnum)
assert parent_level == level-1
hashes_to_check[parent_level].add(parentnum)
# our sibling is now as valid as this node
this_level.discard(siblingnum)
# we're done!
except (BadHashError, NotEnoughHashesError):
for i in remove_upon_failure:
self[i] = None
raise
|
{
"name": "Product Variants",
"version": "8.0.2.1.1",
"depends": [
"product",
],
"author": "OdooMRP team,"
"AvanzOSC,"
"Tecnativa",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <ajuaristio@gmail.com>",
],
"category": "Product Variant",
"website": "http://www.odoomrp.com",
"summary": "Disable automatic product variant creation",
"data": [
"views/product_attribute_price_view.xml",
"views/product_category_view.xml",
"views/product_configurator_view.xml",
"views/product_product_view.xml",
"views/product_template_view.xml",
"security/ir.model.access.csv",
"security/product_configurator_security.xml",
],
"installable": True,
}
|
from __future__ import division, unicode_literals
import os
import re
import sys
import time
import random
from ..compat import compat_os_name
from ..utils import (
decodeArgument,
encodeFilename,
error_to_compat_str,
format_bytes,
shell_quote,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimental)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
hls_use_mpegts: Use the mpegts container for HLS videos.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def format_retries(retries):
return 'inf' if retries == float('inf') else '%.0f' % retries
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit')
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def ytdl_filename(self, filename):
return filename + '.ytdl'
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % error_to_compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if compat_os_name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, err, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen(
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s)...'
% (error_to_compat_str(err), count, self.format_retries(retries)))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
if not hasattr(filename, 'write'):
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
min_sleep_interval = self.params.get('sleep_interval')
if min_sleep_interval:
max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
self.to_screen(
'[download] Sleeping %s seconds...' % (
int(sleep_interval) if sleep_interval.is_integer()
else '%.2f' % sleep_interval))
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
|
"""Support for monitoring the state of Digital Ocean droplets."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOVING,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import ATTR_ATTRIBUTION
import homeassistant.helpers.config_validation as cv
from . import (
ATTR_CREATED_AT,
ATTR_DROPLET_ID,
ATTR_DROPLET_NAME,
ATTR_FEATURES,
ATTR_IPV4_ADDRESS,
ATTR_IPV6_ADDRESS,
ATTR_MEMORY,
ATTR_REGION,
ATTR_VCPUS,
ATTRIBUTION,
CONF_DROPLETS,
DATA_DIGITAL_OCEAN,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Droplet"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_DROPLETS): vol.All(cv.ensure_list, [cv.string])}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Digital Ocean droplet sensor."""
digital = hass.data.get(DATA_DIGITAL_OCEAN)
if not digital:
return False
droplets = config[CONF_DROPLETS]
dev = []
for droplet in droplets:
droplet_id = digital.get_droplet_id(droplet)
if droplet_id is None:
_LOGGER.error("Droplet %s is not available", droplet)
return False
dev.append(DigitalOceanBinarySensor(digital, droplet_id))
add_entities(dev, True)
class DigitalOceanBinarySensor(BinarySensorEntity):
"""Representation of a Digital Ocean droplet sensor."""
def __init__(self, do, droplet_id):
"""Initialize a new Digital Ocean sensor."""
self._digital_ocean = do
self._droplet_id = droplet_id
self._state = None
self.data = None
@property
def name(self):
"""Return the name of the sensor."""
return self.data.name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.data.status == "active"
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_MOVING
@property
def device_state_attributes(self):
"""Return the state attributes of the Digital Ocean droplet."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_CREATED_AT: self.data.created_at,
ATTR_DROPLET_ID: self.data.id,
ATTR_DROPLET_NAME: self.data.name,
ATTR_FEATURES: self.data.features,
ATTR_IPV4_ADDRESS: self.data.ip_address,
ATTR_IPV6_ADDRESS: self.data.ip_v6_address,
ATTR_MEMORY: self.data.memory,
ATTR_REGION: self.data.region["name"],
ATTR_VCPUS: self.data.vcpus,
}
def update(self):
"""Update state of sensor."""
self._digital_ocean.update()
for droplet in self._digital_ocean.data:
if droplet.id == self._droplet_id:
self.data = droplet
|
import inspect
from importlib import import_module
from inspect import cleandoc
from pathlib import Path
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admindocs import utils
from django.contrib.admindocs.utils import (
replace_named_groups, replace_unnamed_groups,
)
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.db import models
from django.http import Http404
from django.template.engine import Engine
from django.urls import get_mod_func, get_resolver, get_urlconf
from django.utils.decorators import method_decorator
from django.utils.inspect import (
func_accepts_kwargs, func_accepts_var_args, get_func_full_args,
method_has_no_args,
)
from django.utils.translation import gettext as _
from django.views.generic import TemplateView
from .utils import get_view_name
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class BaseAdminDocsView(TemplateView):
"""
Base view for admindocs views.
"""
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
if not utils.docutils_is_available:
# Display an error message for people without docutils
self.template_name = 'admin_doc/missing_docutils.html'
return self.render_to_response(admin.site.each_context(request))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return super().get_context_data(**{
**kwargs,
**admin.site.each_context(self.request),
})
class BookmarkletsView(BaseAdminDocsView):
template_name = 'admin_doc/bookmarklets.html'
class TemplateTagIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_tag_index.html'
def get_context_data(self, **kwargs):
tags = []
try:
engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
app_libs = sorted(engine.template_libraries.items())
builtin_libs = [('', lib) for lib in engine.template_builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
title = title and utils.parse_rst(title, 'tag', _('tag:') + tag_name)
body = body and utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return super().get_context_data(**{**kwargs, 'tags': tags})
class TemplateFilterIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_filter_index.html'
def get_context_data(self, **kwargs):
filters = []
try:
engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
app_libs = sorted(engine.template_libraries.items())
builtin_libs = [('', lib) for lib in engine.template_builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
title = title and utils.parse_rst(title, 'filter', _('filter:') + filter_name)
body = body and utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return super().get_context_data(**{**kwargs, 'filters': filters})
class ViewIndexView(BaseAdminDocsView):
template_name = 'admin_doc/view_index.html'
def get_context_data(self, **kwargs):
views = []
urlconf = import_module(settings.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, namespace, name) in view_functions:
views.append({
'full_name': get_view_name(func),
'url': simplify_regex(regex),
'url_name': ':'.join((namespace or []) + (name and [name] or [])),
'namespace': ':'.join(namespace or []),
'name': name,
})
return super().get_context_data(**{**kwargs, 'views': views})
class ViewDetailView(BaseAdminDocsView):
template_name = 'admin_doc/view_detail.html'
@staticmethod
def _get_view_func(view):
urlconf = get_urlconf()
if get_resolver(urlconf)._is_callback(view):
mod, func = get_mod_func(view)
try:
# Separate the module and function, e.g.
# 'mymodule.views.myview' -> 'mymodule.views', 'myview').
return getattr(import_module(mod), func)
except ImportError:
# Import may fail because view contains a class name, e.g.
# 'mymodule.views.ViewContainer.my_view', so mod takes the form
# 'mymodule.views.ViewContainer'. Parse it again to separate
# the module and class.
mod, klass = get_mod_func(mod)
return getattr(getattr(import_module(mod), klass), func)
def get_context_data(self, **kwargs):
view = self.kwargs['view']
view_func = self._get_view_func(view)
if view_func is None:
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
title = title and utils.parse_rst(title, 'view', _('view:') + view)
body = body and utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return super().get_context_data(**{
**kwargs,
'name': view,
'summary': title,
'body': body,
'meta': metadata,
})
class ModelIndexView(BaseAdminDocsView):
template_name = 'admin_doc/model_index.html'
def get_context_data(self, **kwargs):
m_list = [m._meta for m in apps.get_models()]
return super().get_context_data(**{**kwargs, 'models': m_list})
class ModelDetailView(BaseAdminDocsView):
template_name = 'admin_doc/model_detail.html'
def get_context_data(self, **kwargs):
model_name = self.kwargs['model_name']
# Get the model class.
try:
app_config = apps.get_app_config(self.kwargs['app_label'])
except LookupError:
raise Http404(_("App %(app_label)r not found") % self.kwargs)
try:
model = app_config.get_model(model_name)
except LookupError:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs)
opts = model._meta
title, body, metadata = utils.parse_docstring(model.__doc__)
title = title and utils.parse_rst(title, 'model', _('model:') + model_name)
body = body and utils.parse_rst(body, 'model', _('model:') + model_name)
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose or '',
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': app_label,
'object_name': data_type,
}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
methods = []
# Gather model methods.
for func_name, func in model.__dict__.items():
if inspect.isfunction(func) or isinstance(func, property):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
verbose = verbose and (
utils.parse_rst(cleandoc(verbose), 'model', _('model:') + opts.model_name)
)
# Show properties and methods without arguments as fields.
# Otherwise, show as a 'method with arguments'.
if isinstance(func, property):
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose or ''
})
elif method_has_no_args(func) and not func_accepts_kwargs(func) and not func_accepts_var_args(func):
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose or '',
})
else:
arguments = get_func_full_args(func)
# Join arguments with ', ' and in case of default value,
# join it with '='. Use repr() so that strings will be
# correctly displayed.
print_arguments = ', '.join([
'='.join([arg_el[0], *map(repr, arg_el[1:])])
for arg_el in arguments
])
methods.append({
'name': func_name,
'arguments': print_arguments,
'verbose': verbose or '',
})
# Gather related objects
for rel in opts.related_objects:
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': rel.related_model._meta.app_label,
'object_name': rel.related_model._meta.object_name,
}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
return super().get_context_data(**{
**kwargs,
'name': opts.label,
'summary': title,
'description': body,
'fields': fields,
'methods': methods,
})
class TemplateDetailView(BaseAdminDocsView):
template_name = 'admin_doc/template_detail.html'
def get_context_data(self, **kwargs):
template = self.kwargs['template']
templates = []
try:
default_engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
# This doesn't account for template loaders (#24128).
for index, directory in enumerate(default_engine.dirs):
template_file = Path(directory) / template
if template_file.exists():
template_contents = template_file.read_text()
else:
template_contents = ''
templates.append({
'file': template_file,
'exists': template_file.exists(),
'contents': template_contents,
'order': index,
})
return super().get_context_data(**{
**kwargs,
'name': template,
'templates': templates,
})
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""
Return the description for a given field type, if it exists. Fields'
descriptions can contain format strings, which will be interpolated with
the values of field.__dict__ before being output.
"""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns,
base + str(p.pattern),
(namespace or []) + (p.namespace and [p.namespace] or [])
))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + str(p.pattern), namespace, p.name))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
def simplify_regex(pattern):
r"""
Clean up urlpattern regexes into something more readable by humans. For
example, turn "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "/<sport_slug>/athletes/<athlete_slug>/".
"""
pattern = replace_named_groups(pattern)
pattern = replace_unnamed_groups(pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
|
import numpy as np
import time
from operator import add
import numpy as np
import random
from toolbox import *
from models import *
if __name__ == "__main__":
hp = Parameters()
with hp:
batch_size = 256
test_batch_size = 256
load_model = False
save_model = True
debug = False
train_perm = False
walkforward = False
dynamic_eval = False
#Model = lm_gru.LM_gru
#Model = lm_draw.LM_draw
Model = lm_lstm.LM_lstm
#Model = lm_lstm_bn.LM_lstm_bn
seq_size = 20
warmup_size = 1
init_scale = 1.05
learning_rate = 1.5
lr_halflife = 40
optimizer = sgdgc
description = ''
data_path = 'data/'
data = tokentext(path=data_path+'penntree/', name='penntree', batch_size=batch_size, n_train=0)
#data = tokentext(path=data_path+'text8/', name='text8', batch_size=batch_size, n_train=0)
visualize_tokens(-1, data['tr_X'][0:min(len(data['tr_X']), 500)]/float(data['n_tokens']), data['shape_x'])
model = Model(data, hp)
print ("M: %s lr: %.5f init: %.2f batch: %d seq_size: %d desc: %s" % (model.id, learning_rate, init_scale, batch_size, seq_size, description))
if walkforward:
# Walkforward learning
n_ws = len(data['tr_X']) / walkstep_size / batch_size
it_lr = learning_rate
for walkstep in xrange(0, n_ws):
begin = time.time()
min_validation = 100000.
#tr_outputs = model.train_walkstep(walkstep, ws_iterations, it_lr)
# Validate on previous data
for it in range(ws_iterations):
begin_inner = time.time()
tr_outputs = None
for i in xrange(0, walkstep_size):
batch_idx = walkstep * walkstep_size + i
outputs = model.train(batch_idx, it_lr)
outputs = map(lambda x: x / float(walkstep_size * batch_size), outputs)
if i==0:
tr_outputs = outputs
else:
tr_outputs = map(add, tr_outputs, outputs)
prev_va_outputs = [0.] * 100
for i in xrange(0, walkstep * walkstep_size):
outputs = model.validate(i)
outputs = map(lambda x: x / (walkstep * walkstep_size * batch_size), outputs)
prev_va_outputs = (map(add, prev_va_outputs, outputs) if i!=0 else outputs)
print(" > %d,\t%.2f,\t%.2f,\t%.2f,\t%.2f,\t%.3f,\t%.2f" % (it,
tr_outputs[model.outidx['cost_q']], prev_va_outputs[model.outidx['cost_q']],
tr_outputs[model.outidx['cost']], prev_va_outputs[model.outidx['cost']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin_inner))
# Early stopping on previous data
if prev_va_outputs[model.outidx['cost']] < min_validation:
min_validation = prev_va_outputs[model.outidx['cost']]
elif prev_va_outputs[model.outidx['cost']] > min_validation * (1. + ws_validstop):
break
te_outputs = model.test_epoch()
if model.type == 'SL':
# Supervised learning
print("%d,%.4f,%.4f,%.4f,%.4f,%.4f,%.2f" % (walkstep,
tr_outputs[model.outidx['cost']], te_outputs[model.outidx['cost']],
tr_outputs[model.outidx['error_map_pyx']], te_outputs[model.outidx['error_map_pyx']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin))
else:
# Unsupervised learning
print("%d,\t%.2f,\t%.2f,\t%.2f,\t%.2f,\t%.3f,\t%.2f" % (walkstep,
tr_outputs[model.outidx['cost_q']], te_outputs[model.outidx['cost_q']],
tr_outputs[model.outidx['cost']], te_outputs[model.outidx['cost']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin))
# Generate samples
y_samples = model.decode(36 * (model.n_t + 1))
y_samples = np.transpose(y_samples, (1,0,2)).reshape((-1, y_samples.shape[2]))
visualize(walkstep + 1, y_samples, data['shape_x'])
#it_lr = float(learning_rate / (walkstep + 1.))
#it_lr = it_lr*n_stepdecay
ws_iterations = int(ws_iterations*n_stepdecay)
model.save()
elif dynamic_eval:
# Dynamic evaluation
it_lr = float(learning_rate)
va_outputs = model.validation_epoch()
te_outputs = model.test_epoch()
dyn_va_outputs = model.dyn_validation_epoch(it_lr)
model.load()
dyn_te_outputs = model.dyn_test_epoch(it_lr)
if model.type == 'LM':
# Lanugage model
print("0,%.2f,%.2f,%.2f,%.2f" % (np.exp(va_outputs[model.outidx['cost']]),
np.exp(te_outputs[model.outidx['cost']]),
np.exp(dyn_va_outputs[model.outidx['cost']]),
np.exp(dyn_te_outputs[model.outidx['cost']]) ))
else:
# Full training data learning
n_iterations = 10000
freq_save = 2
freq_sample = 2
it_lr = float(learning_rate)
#rnd_offset = np.arange(seq_size)
rnd_offset = np.random.permutation(seq_size)
for it in range(n_iterations):
begin = time.time()
model.permuteData(data)
tr_outputs = model.train_epoch(it_lr, rnd_offset[it % seq_size])
va_outputs = model.validation_epoch()
#te_outputs = model.test_epoch()
if model.type == 'LM':
# Lanugage model
print("%d,%.2f,%.2f,%.2e,%.2f" % (it,
np.exp(tr_outputs[model.outidx['cost']]),
np.exp(va_outputs[model.outidx['cost']]),
tr_outputs[model.outidx['norm_grad']],
time.time() - begin))
# Save model parameters
if hp.save_model and it % freq_save == 0:
model.save()
if lr_halflife != 0:
it_lr = float(it_lr*np.power(0.5, 1./lr_halflife))
|
import sys
sys.path.insert(0, '.')
from twisted.web.microdom import parseString
from twisted.web.domhelpers import findNodesNamed
from exe.engine.path import Path
import json
import re
if __name__ == '__main__':
files = {'lomVocab': Path('exe') / 'webui' / 'schemas' / 'scorm2004' / 'common' / 'vocabValues.xsd',
'lomesVocab': Path('exe') / 'webui' / 'schemas' / 'scorm2004' / 'vocab' / 'lomesvocab.xsd'}
response = ''
vocab = {}
for varname, f in files.items():
document = parseString(f.bytes(), escapeAttributes=0)
nodes = findNodesNamed(document, 'xs:simpletype')
for node in nodes:
name = node.getAttribute('name', str())
enumerations = findNodesNamed(node, 'xs:enumeration')
vocab[name] = []
for enumeration in enumerations:
vocab[name].append([enumeration.getAttribute('value'), '_(%s)' % enumeration.getAttribute('value')])
response += '%s = %s;\n\n' % (varname, json.dumps(vocab, indent=4).encode('utf-8'))
outfile = Path('exe') / 'jsui' / 'scripts' / 'lomvocab.js'
response = re.sub('"_\(', '_("', response)
response = re.sub('\)"', '")', response)
outfile.write_bytes(response)
|
from django.conf import settings # noqa
from django.forms import ValidationError # noqa
from django import http
from django.utils.translation import ugettext_lazy as _ # noqa
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions as utils
from horizon.utils import validators
from openstack_dashboard import api
class PasswordForm(forms.SelfHandlingForm):
current_password = forms.CharField(label=_("Current password"),
widget=forms.PasswordInput(render_value=False))
new_password = forms.RegexField(label=_("New password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid':
validators.password_validator_msg()})
confirm_password = forms.CharField(label=_("Confirm new password"),
widget=forms.PasswordInput(render_value=False))
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'new_password' in data:
if data['new_password'] != data.get('confirm_password', None):
raise ValidationError(_('Passwords do not match.'))
return data
# We have to protect the entire "data" dict because it contains the
# oldpassword and newpassword strings.
@sensitive_variables('data')
def handle(self, request, data):
user_is_editable = api.keystone.keystone_can_edit_user()
if user_is_editable:
try:
api.keystone.user_update_own_password(request,
data['current_password'],
data['new_password'])
response = http.HttpResponseRedirect(settings.LOGOUT_URL)
msg = _("Password changed. Please log in again to continue.")
utils.add_logout_reason(request, response, msg)
return response
except Exception:
exceptions.handle(request,
_('Unable to change password.'))
return False
else:
messages.error(request, _('Changing password is not supported.'))
return False
return True
|
'''Thread-safe version of Tkinter.
Copyright (c) 2009, Allen B. Taylor
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser Public License for more details.
You should have received a copy of the GNU Lesser Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Usage:
import mtTkinter as Tkinter
# Use "Tkinter." as usual.
or
from mtTkinter import *
# Use Tkinter module definitions as usual.
This module modifies the original Tkinter module in memory, making all
functionality thread-safe. It does this by wrapping the Tk class' tk
instance with an object that diverts calls through an event queue when
the call is issued from a thread other than the thread in which the Tk
instance was created. The events are processed in the creation thread
via an 'after' event.
The modified Tk class accepts two additional keyword parameters on its
__init__ method:
mtDebug:
0 = No debug output (default)
1 = Minimal debug output
...
9 = Full debug output
mtCheckPeriod:
Amount of time in milliseconds (default 100) between checks for
out-of-thread events when things are otherwise idle. Decreasing
this value can improve GUI responsiveness, but at the expense of
consuming more CPU cycles.
Note that, because it modifies the original Tkinter module (in memory),
other modules that use Tkinter (e.g., Pmw) reap the benefits automagically
as long as mtTkinter is imported at some point before extra threads are
created.
Author: Allen B. Taylor, a.b.taylor@gmail.com
'''
from Tkinter import *
import threading
import Queue
class _Tk(object):
"""
Wrapper for underlying attribute tk of class Tk.
"""
def __init__(self, tk, mtDebug = 0, mtCheckPeriod = 10):
self._tk = tk
# Create the incoming event queue.
self._eventQueue = Queue.Queue(1)
# Identify the thread from which this object is being created so we can
# tell later whether an event is coming from another thread.
self._creationThread = threading.currentThread()
# Store remaining values.
self._debug = mtDebug
self._checkPeriod = mtCheckPeriod
def __getattr__(self, name):
# Divert attribute accesses to a wrapper around the underlying tk
# object.
return _TkAttr(self, getattr(self._tk, name))
class _TkAttr(object):
"""
Thread-safe callable attribute wrapper.
"""
def __init__(self, tk, attr):
self._tk = tk
self._attr = attr
def __call__(self, *args, **kwargs):
"""
Thread-safe method invocation.
Diverts out-of-thread calls through the event queue.
Forwards all other method calls to the underlying tk object directly.
"""
# Check if we're in the creation thread.
if threading.currentThread() == self._tk._creationThread:
# We're in the creation thread; just call the event directly.
if self._tk._debug >= 8 or \
self._tk._debug >= 3 and self._attr.__name__ == 'call' and \
len(args) >= 1 and args[0] == 'after':
print 'Calling event directly:', \
self._attr.__name__, args, kwargs
return self._attr(*args, **kwargs)
else:
# We're in a different thread than the creation thread; enqueue
# the event, and then wait for the response.
responseQueue = Queue.Queue(1)
if self._tk._debug >= 1:
print 'Marshalling event:', self._attr.__name__, args, kwargs
self._tk._eventQueue.put((self._attr, args, kwargs, responseQueue))
isException, response = responseQueue.get()
# Handle the response, whether it's a normal return value or
# an exception.
if isException:
exType, exValue, exTb = response
raise exType, exValue, exTb
else:
return response
def _Tk__init__(self, *args, **kwargs):
# We support some new keyword arguments that the original __init__ method
# doesn't expect, so separate those out before doing anything else.
new_kwnames = ('mtCheckPeriod', 'mtDebug')
new_kwargs = {}
for name, value in kwargs.items():
if name in new_kwnames:
new_kwargs[name] = value
del kwargs[name]
# Call the original __init__ method, creating the internal tk member.
self.__original__init__mtTkinter(*args, **kwargs)
# Replace the internal tk member with a wrapper that handles calls from
# other threads.
self.tk = _Tk(self.tk, **new_kwargs)
# Set up the first event to check for out-of-thread events.
self.after_idle(_CheckEvents, self)
Tk.__original__init__mtTkinter = Tk.__init__
Tk.__init__ = _Tk__init__
def _CheckEvents(tk):
"Event checker event."
used = False
try:
# Process all enqueued events, then exit.
while True:
try:
# Get an event request from the queue.
method, args, kwargs, responseQueue = \
tk.tk._eventQueue.get_nowait()
except:
# No more events to process.
break
else:
# Call the event with the given arguments, and then return
# the result back to the caller via the response queue.
used = True
if tk.tk._debug >= 2:
print 'Calling event from main thread:', \
method.__name__, args, kwargs
try:
responseQueue.put((False, method(*args, **kwargs)))
except SystemExit, ex:
raise SystemExit, ex
except Exception, ex:
# Calling the event caused an exception; return the
# exception back to the caller so that it can be raised
# in the caller's thread.
from sys import exc_info
exType, exValue, exTb = exc_info()
responseQueue.put((True, (exType, exValue, exTb)))
finally:
# Schedule to check again. If we just processed an event, check
# immediately; if we didn't, check later.
if used:
tk.after_idle(_CheckEvents, tk)
else:
tk.after(tk.tk._checkPeriod, _CheckEvents, tk)
def _testThread(root):
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
try:
text = text + unicode("\nThis should be a cedilla: \347",
"iso-8859-1")
except NameError:
pass # no unicode support
try:
if root.globalgetvar('tcl_platform(threaded)'):
text = text + "\nTcl is built with thread support"
else:
raise RuntimeError
except:
text = text + "\nTcl is NOT built with thread support"
text = text + "\nmtTkinter works with or without Tcl thread support"
label = Label(root, text=text)
label.pack()
button = Button(root, text="Click me!",
command=lambda root=root: root.button.configure(
text="[%s]" % root.button['text']))
button.pack()
root.button = button
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
# Simulate button presses...
button.invoke()
root.after(1000, _pressOk, root, button)
def _pressOk(root, button):
button.invoke()
try:
root.after(1000, _pressOk, root, button)
except:
pass # Likely we're exiting
if __name__ == '__main__':
import threading
root = Tk(mtDebug = 1)
thread = threading.Thread(target = _testThread, args=(root,))
thread.start()
root.mainloop()
thread.join()
|
"""
Tests for L{twisted.trial._dist.distreporter}.
"""
from twisted.python.compat import NativeStringIO as StringIO
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial.unittest import TestCase
from twisted.trial.reporter import TreeReporter
class DistReporterTests(TestCase):
"""
Tests for L{DistReporter}.
"""
def setUp(self):
self.stream = StringIO()
self.distReporter = DistReporter(TreeReporter(self.stream))
self.test = TestCase()
def test_startSuccessStop(self):
"""
Success output only gets sent to the stream after the test has stopped.
"""
self.distReporter.startTest(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.addSuccess(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.stopTest(self.test)
self.assertNotEqual(self.stream.getvalue(), "")
def test_startErrorStop(self):
"""
Error output only gets sent to the stream after the test has stopped.
"""
self.distReporter.startTest(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.addError(self.test, "error")
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.stopTest(self.test)
self.assertNotEqual(self.stream.getvalue(), "")
def test_forwardedMethods(self):
"""
Calling methods of L{DistReporter} add calls to the running queue of
the test.
"""
self.distReporter.startTest(self.test)
self.distReporter.addFailure(self.test, "foo")
self.distReporter.addError(self.test, "bar")
self.distReporter.addSkip(self.test, "egg")
self.distReporter.addUnexpectedSuccess(self.test, "spam")
self.distReporter.addExpectedFailure(self.test, "err", "foo")
self.assertEqual(len(self.distReporter.running[self.test.id()]), 6)
|
def make_string(seq):
str = ''
for c in seq:
# Screen out non-printing characters
if 32 <= c and c < 256:
str += chr(c)
# If no printing chars
if not str:
return seq
return str
def make_string_uc(seq):
code = seq[0:8]
seq = seq[8:]
# Of course, this is only correct if ASCII, and the standard explicitly
# allows JIS and Unicode.
return make_string(seq)
FIELD_TYPES = (
(0, 'X', 'Proprietary'), # no such type
(1, 'B', 'Byte'),
(1, 'A', 'ASCII'),
(2, 'S', 'Short'),
(4, 'L', 'Long'),
(8, 'R', 'Ratio'),
(1, 'SB', 'Signed Byte'),
(1, 'U', 'Undefined'),
(2, 'SS', 'Signed Short'),
(4, 'SL', 'Signed Long'),
(8, 'SR', 'Signed Ratio'),
)
EXIF_TAGS = {
0x0100: ('ImageWidth', ),
0x0101: ('ImageLength', ),
0x0102: ('BitsPerSample', ),
0x0103: ('Compression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed'}),
0x0106: ('PhotometricInterpretation', ),
0x0107: ('Thresholding', ),
0x010A: ('FillOrder', ),
0x010D: ('DocumentName', ),
0x010E: ('ImageDescription', ),
0x010F: ('Make', ),
0x0110: ('Model', ),
0x0111: ('StripOffsets', ),
0x0112: ('Orientation',
{1: 'Horizontal (normal)',
2: 'Mirrored horizontal',
3: 'Rotated 180',
4: 'Mirrored vertical',
5: 'Mirrored horizontal then rotated 90 CCW',
6: 'Rotated 90 CW',
7: 'Mirrored horizontal then rotated 90 CW',
8: 'Rotated 90 CCW'}),
0x0115: ('SamplesPerPixel', ),
0x0116: ('RowsPerStrip', ),
0x0117: ('StripByteCounts', ),
0x011A: ('XResolution', ),
0x011B: ('YResolution', ),
0x011C: ('PlanarConfiguration', ),
0x011D: ('PageName', make_string),
0x0128: ('ResolutionUnit',
{1: 'Not Absolute',
2: 'Pixels/Inch',
3: 'Pixels/Centimeter'}),
0x012D: ('TransferFunction', ),
0x0131: ('Software', ),
0x0132: ('DateTime', ),
0x013B: ('Artist', ),
0x013E: ('WhitePoint', ),
0x013F: ('PrimaryChromaticities', ),
0x0156: ('TransferRange', ),
0x0200: ('JPEGProc', ),
0x0201: ('JPEGInterchangeFormat', ),
0x0202: ('JPEGInterchangeFormatLength', ),
0x0211: ('YCbCrCoefficients', ),
0x0212: ('YCbCrSubSampling', ),
0x0213: ('YCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0214: ('ReferenceBlackWhite', ),
0x4746: ('Rating', ),
0x828D: ('CFARepeatPatternDim', ),
0x828E: ('CFAPattern', ),
0x828F: ('BatteryLevel', ),
0x8298: ('Copyright', ),
0x829A: ('ExposureTime', ),
0x829D: ('FNumber', ),
0x83BB: ('IPTC/NAA', ),
0x8769: ('ExifOffset', ),
0x8773: ('InterColorProfile', ),
0x8822: ('ExposureProgram',
{0: 'Unidentified',
1: 'Manual',
2: 'Program Normal',
3: 'Aperture Priority',
4: 'Shutter Priority',
5: 'Program Creative',
6: 'Program Action',
7: 'Portrait Mode',
8: 'Landscape Mode'}),
0x8824: ('SpectralSensitivity', ),
0x8825: ('GPSInfo', ),
0x8827: ('ISOSpeedRatings', ),
0x8828: ('OECF', ),
0x9000: ('ExifVersion', make_string),
0x9003: ('DateTimeOriginal', ),
0x9004: ('DateTimeDigitized', ),
0x9101: ('ComponentsConfiguration',
{0: '',
1: 'Y',
2: 'Cb',
3: 'Cr',
4: 'Red',
5: 'Green',
6: 'Blue'}),
0x9102: ('CompressedBitsPerPixel', ),
0x9201: ('ShutterSpeedValue', ),
0x9202: ('ApertureValue', ),
0x9203: ('BrightnessValue', ),
0x9204: ('ExposureBiasValue', ),
0x9205: ('MaxApertureValue', ),
0x9206: ('SubjectDistance', ),
0x9207: ('MeteringMode',
{0: 'Unidentified',
1: 'Average',
2: 'CenterWeightedAverage',
3: 'Spot',
4: 'MultiSpot',
5: 'Pattern'}),
0x9208: ('LightSource',
{0: 'Unknown',
1: 'Daylight',
2: 'Fluorescent',
3: 'Tungsten',
9: 'Fine Weather',
10: 'Flash',
11: 'Shade',
12: 'Daylight Fluorescent',
13: 'Day White Fluorescent',
14: 'Cool White Fluorescent',
15: 'White Fluorescent',
17: 'Standard Light A',
18: 'Standard Light B',
19: 'Standard Light C',
20: 'D55',
21: 'D65',
22: 'D75',
255: 'Other'}),
0x9209: ('Flash',
{0: 'No',
1: 'Fired',
5: 'Fired (?)', # no return sensed
7: 'Fired (!)', # return sensed
9: 'Fill Fired',
13: 'Fill Fired (?)',
15: 'Fill Fired (!)',
16: 'Off',
24: 'Auto Off',
25: 'Auto Fired',
29: 'Auto Fired (?)',
31: 'Auto Fired (!)',
32: 'Not Available'}),
0x920A: ('FocalLength', ),
0x9214: ('SubjectArea', ),
0x927C: ('MakerNote', ),
0x9286: ('UserComment', make_string_uc),
0x9290: ('SubSecTime', ),
0x9291: ('SubSecTimeOriginal', ),
0x9292: ('SubSecTimeDigitized', ),
# used by Windows Explorer
0x9C9B: ('XPTitle', ),
0x9C9C: ('XPComment', ),
0x9C9D: ('XPAuthor', ), #(ignored by Windows Explorer if Artist exists)
0x9C9E: ('XPKeywords', ),
0x9C9F: ('XPSubject', ),
0xA000: ('FlashPixVersion', make_string),
0xA001: ('ColorSpace',
{1: 'sRGB',
2: 'Adobe RGB',
65535: 'Uncalibrated'}),
0xA002: ('ExifImageWidth', ),
0xA003: ('ExifImageLength', ),
0xA005: ('InteroperabilityOffset', ),
0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP
0xA20C: ('SpatialFrequencyResponse', ), # 0x920C
0xA20E: ('FocalPlaneXResolution', ), # 0x920E
0xA20F: ('FocalPlaneYResolution', ), # 0x920F
0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210
0xA214: ('SubjectLocation', ), # 0x9214
0xA215: ('ExposureIndex', ), # 0x9215
0xA217: ('SensingMethod', # 0x9217
{1: 'Not defined',
2: 'One-chip color area',
3: 'Two-chip color area',
4: 'Three-chip color area',
5: 'Color sequential area',
7: 'Trilinear',
8: 'Color sequential linear'}),
0xA300: ('FileSource',
{1: 'Film Scanner',
2: 'Reflection Print Scanner',
3: 'Digital Camera'}),
0xA301: ('SceneType',
{1: 'Directly Photographed'}),
0xA302: ('CVAPattern', ),
0xA401: ('CustomRendered',
{0: 'Normal',
1: 'Custom'}),
0xA402: ('ExposureMode',
{0: 'Auto Exposure',
1: 'Manual Exposure',
2: 'Auto Bracket'}),
0xA403: ('WhiteBalance',
{0: 'Auto',
1: 'Manual'}),
0xA404: ('DigitalZoomRatio', ),
0xA405: ('FocalLengthIn35mmFilm', ),
0xA406: ('SceneCaptureType',
{0: 'Standard',
1: 'Landscape',
2: 'Portrait',
3: 'Night)'}),
0xA407: ('GainControl',
{0: 'None',
1: 'Low gain up',
2: 'High gain up',
3: 'Low gain down',
4: 'High gain down'}),
0xA408: ('Contrast',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA409: ('Saturation',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40A: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40B: ('DeviceSettingDescription', ),
0xA40C: ('SubjectDistanceRange', ),
0xA500: ('Gamma', ),
0xC4A5: ('PrintIM', ),
0xEA1C: ('Padding', ),
}
INTR_TAGS = {
0x0001: ('InteroperabilityIndex', ),
0x0002: ('InteroperabilityVersion', ),
0x1000: ('RelatedImageFileFormat', ),
0x1001: ('RelatedImageWidth', ),
0x1002: ('RelatedImageLength', ),
}
GPS_TAGS = {
0x0000: ('GPSVersionID', ),
0x0001: ('GPSLatitudeRef', ),
0x0002: ('GPSLatitude', ),
0x0003: ('GPSLongitudeRef', ),
0x0004: ('GPSLongitude', ),
0x0005: ('GPSAltitudeRef', ),
0x0006: ('GPSAltitude', ),
0x0007: ('GPSTimeStamp', ),
0x0008: ('GPSSatellites', ),
0x0009: ('GPSStatus', ),
0x000A: ('GPSMeasureMode', ),
0x000B: ('GPSDOP', ),
0x000C: ('GPSSpeedRef', ),
0x000D: ('GPSSpeed', ),
0x000E: ('GPSTrackRef', ),
0x000F: ('GPSTrack', ),
0x0010: ('GPSImgDirectionRef', ),
0x0011: ('GPSImgDirection', ),
0x0012: ('GPSMapDatum', ),
0x0013: ('GPSDestLatitudeRef', ),
0x0014: ('GPSDestLatitude', ),
0x0015: ('GPSDestLongitudeRef', ),
0x0016: ('GPSDestLongitude', ),
0x0017: ('GPSDestBearingRef', ),
0x0018: ('GPSDestBearing', ),
0x0019: ('GPSDestDistanceRef', ),
0x001A: ('GPSDestDistance', ),
0x001D: ('GPSDate', ),
}
IGNORE_TAGS=(0x9286, 0x927C)
def nikon_ev_bias(seq):
# First digit seems to be in steps of 1/6 EV.
# Does the third value mean the step size? It is usually 6,
# but it is 12 for the ExposureDifference.
#
# Check for an error condition that could cause a crash.
# This only happens if something has gone really wrong in
# reading the Nikon MakerNote.
if len( seq ) < 4 : return ""
#
if seq == [252, 1, 6, 0]:
return "-2/3 EV"
if seq == [253, 1, 6, 0]:
return "-1/2 EV"
if seq == [254, 1, 6, 0]:
return "-1/3 EV"
if seq == [0, 1, 6, 0]:
return "0 EV"
if seq == [2, 1, 6, 0]:
return "+1/3 EV"
if seq == [3, 1, 6, 0]:
return "+1/2 EV"
if seq == [4, 1, 6, 0]:
return "+2/3 EV"
# Handle combinations not in the table.
a = seq[0]
# Causes headaches for the +/- logic, so special case it.
if a == 0:
return "0 EV"
if a > 127:
a = 256 - a
ret_str = "-"
else:
ret_str = "+"
b = seq[2] # Assume third value means the step size
whole = a / b
a = a % b
if whole != 0:
ret_str = ret_str + str(whole) + " "
if a == 0:
ret_str = ret_str + "EV"
else:
r = Ratio(a, b)
ret_str = ret_str + r.__repr__() + " EV"
return ret_str
MAKERNOTE_NIKON_NEWER_TAGS={
0x0001: ('MakernoteVersion', make_string), # Sometimes binary
0x0002: ('ISOSetting', make_string),
0x0003: ('ColorMode', ),
0x0004: ('Quality', ),
0x0005: ('Whitebalance', ),
0x0006: ('ImageSharpening', ),
0x0007: ('FocusMode', ),
0x0008: ('FlashSetting', ),
0x0009: ('AutoFlashMode', ),
0x000B: ('WhiteBalanceBias', ),
0x000C: ('WhiteBalanceRBCoeff', ),
0x000D: ('ProgramShift', nikon_ev_bias),
# Nearly the same as the other EV vals, but step size is 1/12 EV (?)
0x000E: ('ExposureDifference', nikon_ev_bias),
0x000F: ('ISOSelection', ),
0x0011: ('NikonPreview', ),
0x0012: ('FlashCompensation', nikon_ev_bias),
0x0013: ('ISOSpeedRequested', ),
0x0016: ('PhotoCornerCoordinates', ),
# 0x0017: Unknown, but most likely an EV value
0x0018: ('FlashBracketCompensationApplied', nikon_ev_bias),
0x0019: ('AEBracketCompensationApplied', ),
0x001A: ('ImageProcessing', ),
0x001B: ('CropHiSpeed', ),
0x001D: ('SerialNumber', ), # Conflict with 0x00A0 ?
0x001E: ('ColorSpace', ),
0x001F: ('VRInfo', ),
0x0020: ('ImageAuthentication', ),
0x0022: ('ActiveDLighting', ),
0x0023: ('PictureControl', ),
0x0024: ('WorldTime', ),
0x0025: ('ISOInfo', ),
0x0080: ('ImageAdjustment', ),
0x0081: ('ToneCompensation', ),
0x0082: ('AuxiliaryLens', ),
0x0083: ('LensType', ),
0x0084: ('LensMinMaxFocalMaxAperture', ),
0x0085: ('ManualFocusDistance', ),
0x0086: ('DigitalZoomFactor', ),
0x0087: ('FlashMode',
{0x00: 'Did Not Fire',
0x01: 'Fired, Manual',
0x07: 'Fired, External',
0x08: 'Fired, Commander Mode ',
0x09: 'Fired, TTL Mode'}),
0x0088: ('AFFocusPosition',
{0x0000: 'Center',
0x0100: 'Top',
0x0200: 'Bottom',
0x0300: 'Left',
0x0400: 'Right'}),
0x0089: ('BracketingMode',
{0x00: 'Single frame, no bracketing',
0x01: 'Continuous, no bracketing',
0x02: 'Timer, no bracketing',
0x10: 'Single frame, exposure bracketing',
0x11: 'Continuous, exposure bracketing',
0x12: 'Timer, exposure bracketing',
0x40: 'Single frame, white balance bracketing',
0x41: 'Continuous, white balance bracketing',
0x42: 'Timer, white balance bracketing'}),
0x008A: ('AutoBracketRelease', ),
0x008B: ('LensFStops', ),
0x008C: ('NEFCurve1', ), # ExifTool calls this 'ContrastCurve'
0x008D: ('ColorMode', ),
0x008F: ('SceneMode', ),
0x0090: ('LightingType', ),
0x0091: ('ShotInfo', ), # First 4 bytes are a version number in ASCII
0x0092: ('HueAdjustment', ),
# ExifTool calls this 'NEFCompression', should be 1-4
0x0093: ('Compression', ),
0x0094: ('Saturation',
{-3: 'B&W',
-2: '-2',
-1: '-1',
0: '0',
1: '1',
2: '2'}),
0x0095: ('NoiseReduction', ),
0x0096: ('NEFCurve2', ), # ExifTool calls this 'LinearizationTable'
0x0097: ('ColorBalance', ), # First 4 bytes are a version number in ASCII
0x0098: ('LensData', ), # First 4 bytes are a version number in ASCII
0x0099: ('RawImageCenter', ),
0x009A: ('SensorPixelSize', ),
0x009C: ('Scene Assist', ),
0x009E: ('RetouchHistory', ),
0x00A0: ('SerialNumber', ),
0x00A2: ('ImageDataSize', ),
# 00A3: unknown - a single byte 0
# 00A4: In NEF, looks like a 4 byte ASCII version number ('0200')
0x00A5: ('ImageCount', ),
0x00A6: ('DeletedImageCount', ),
0x00A7: ('TotalShutterReleases', ),
# First 4 bytes are a version number in ASCII, with version specific
# info to follow. Its hard to treat it as a string due to embedded nulls.
0x00A8: ('FlashInfo', ),
0x00A9: ('ImageOptimization', ),
0x00AA: ('Saturation', ),
0x00AB: ('DigitalVariProgram', ),
0x00AC: ('ImageStabilization', ),
0x00AD: ('Responsive AF', ), # 'AFResponse'
0x00B0: ('MultiExposure', ),
0x00B1: ('HighISONoiseReduction', ),
0x00B7: ('AFInfo', ),
0x00B8: ('FileInfo', ),
# 00B9: unknown
0x0100: ('DigitalICE', ),
0x0103: ('PreviewCompression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed',}),
0x0201: ('PreviewImageStart', ),
0x0202: ('PreviewImageLength', ),
0x0213: ('PreviewYCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0010: ('DataDump', ),
}
MAKERNOTE_NIKON_OLDER_TAGS = {
0x0003: ('Quality',
{1: 'VGA Basic',
2: 'VGA Normal',
3: 'VGA Fine',
4: 'SXGA Basic',
5: 'SXGA Normal',
6: 'SXGA Fine'}),
0x0004: ('ColorMode',
{1: 'Color',
2: 'Monochrome'}),
0x0005: ('ImageAdjustment',
{0: 'Normal',
1: 'Bright+',
2: 'Bright-',
3: 'Contrast+',
4: 'Contrast-'}),
0x0006: ('CCDSpeed',
{0: 'ISO 80',
2: 'ISO 160',
4: 'ISO 320',
5: 'ISO 100'}),
0x0007: ('WhiteBalance',
{0: 'Auto',
1: 'Preset',
2: 'Daylight',
3: 'Incandescent',
4: 'Fluorescent',
5: 'Cloudy',
6: 'Speed Light'}),
}
def olympus_special_mode(v):
a={
0: 'Normal',
1: 'Unknown',
2: 'Fast',
3: 'Panorama'}
b={
0: 'Non-panoramic',
1: 'Left to right',
2: 'Right to left',
3: 'Bottom to top',
4: 'Top to bottom'}
if v[0] not in a or v[2] not in b:
return v
return '%s - sequence %d - %s' % (a[v[0]], v[1], b[v[2]])
MAKERNOTE_OLYMPUS_TAGS={
# ah HAH! those sneeeeeaky bastids! this is how they get past the fact
# that a JPEG thumbnail is not allowed in an uncompressed TIFF file
0x0100: ('JPEGThumbnail', ),
0x0200: ('SpecialMode', olympus_special_mode),
0x0201: ('JPEGQual',
{1: 'SQ',
2: 'HQ',
3: 'SHQ'}),
0x0202: ('Macro',
{0: 'Normal',
1: 'Macro',
2: 'SuperMacro'}),
0x0203: ('BWMode',
{0: 'Off',
1: 'On'}),
0x0204: ('DigitalZoom', ),
0x0205: ('FocalPlaneDiagonal', ),
0x0206: ('LensDistortionParams', ),
0x0207: ('SoftwareRelease', ),
0x0208: ('PictureInfo', ),
0x0209: ('CameraID', make_string), # print as string
0x0F00: ('DataDump', ),
0x0300: ('PreCaptureFrames', ),
0x0404: ('SerialNumber', ),
0x1000: ('ShutterSpeedValue', ),
0x1001: ('ISOValue', ),
0x1002: ('ApertureValue', ),
0x1003: ('BrightnessValue', ),
0x1004: ('FlashMode', ),
0x1004: ('FlashMode',
{2: 'On',
3: 'Off'}),
0x1005: ('FlashDevice',
{0: 'None',
1: 'Internal',
4: 'External',
5: 'Internal + External'}),
0x1006: ('ExposureCompensation', ),
0x1007: ('SensorTemperature', ),
0x1008: ('LensTemperature', ),
0x100b: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1017: ('RedBalance', ),
0x1018: ('BlueBalance', ),
0x101a: ('SerialNumber', ),
0x1023: ('FlashExposureComp', ),
0x1026: ('ExternalFlashBounce',
{0: 'No',
1: 'Yes'}),
0x1027: ('ExternalFlashZoom', ),
0x1028: ('ExternalFlashMode', ),
0x1029: ('Contrast int16u',
{0: 'High',
1: 'Normal',
2: 'Low'}),
0x102a: ('SharpnessFactor', ),
0x102b: ('ColorControl', ),
0x102c: ('ValidBits', ),
0x102d: ('CoringFilter', ),
0x102e: ('OlympusImageWidth', ),
0x102f: ('OlympusImageHeight', ),
0x1034: ('CompressionRatio', ),
0x1035: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x1036: ('PreviewImageStart', ),
0x1037: ('PreviewImageLength', ),
0x1039: ('CCDScanMode',
{0: 'Interlaced',
1: 'Progressive'}),
0x103a: ('NoiseReduction',
{0: 'Off',
1: 'On'}),
0x103b: ('InfinityLensStep', ),
0x103c: ('NearLensStep', ),
# TODO - these need extra definitions
# http://search.cpan.org/src/EXIFTOOL/Image-ExifTool-6.90/html/TagNames/Olympus.html
0x2010: ('Equipment', ),
0x2020: ('CameraSettings', ),
0x2030: ('RawDevelopment', ),
0x2040: ('ImageProcessing', ),
0x2050: ('FocusInfo', ),
0x3000: ('RawInfo ', ),
}
MAKERNOTE_OLYMPUS_TAG_0x2020={
0x0100: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x0101: ('PreviewImageStart', ),
0x0102: ('PreviewImageLength', ),
0x0200: ('ExposureMode',
{1: 'Manual',
2: 'Program',
3: 'Aperture-priority AE',
4: 'Shutter speed priority AE',
5: 'Program-shift'}),
0x0201: ('AELock',
{0: 'Off',
1: 'On'}),
0x0202: ('MeteringMode',
{2: 'Center Weighted',
3: 'Spot',
5: 'ESP',
261: 'Pattern+AF',
515: 'Spot+Highlight control',
1027: 'Spot+Shadow control'}),
0x0300: ('MacroMode',
{0: 'Off',
1: 'On'}),
0x0301: ('FocusMode',
{0: 'Single AF',
1: 'Sequential shooting AF',
2: 'Continuous AF',
3: 'Multi AF',
10: 'MF'}),
0x0302: ('FocusProcess',
{0: 'AF Not Used',
1: 'AF Used'}),
0x0303: ('AFSearch',
{0: 'Not Ready',
1: 'Ready'}),
0x0304: ('AFAreas', ),
0x0401: ('FlashExposureCompensation', ),
0x0500: ('WhiteBalance2',
{0: 'Auto',
16: '7500K (Fine Weather with Shade)',
17: '6000K (Cloudy)',
18: '5300K (Fine Weather)',
20: '3000K (Tungsten light)',
21: '3600K (Tungsten light-like)',
33: '6600K (Daylight fluorescent)',
34: '4500K (Neutral white fluorescent)',
35: '4000K (Cool white fluorescent)',
48: '3600K (Tungsten light-like)',
256: 'Custom WB 1',
257: 'Custom WB 2',
258: 'Custom WB 3',
259: 'Custom WB 4',
512: 'Custom WB 5400K',
513: 'Custom WB 2900K',
514: 'Custom WB 8000K', }),
0x0501: ('WhiteBalanceTemperature', ),
0x0502: ('WhiteBalanceBracket', ),
0x0503: ('CustomSaturation', ), # (3 numbers: 1. CS Value, 2. Min, 3. Max)
0x0504: ('ModifiedSaturation',
{0: 'Off',
1: 'CM1 (Red Enhance)',
2: 'CM2 (Green Enhance)',
3: 'CM3 (Blue Enhance)',
4: 'CM4 (Skin Tones)'}),
0x0505: ('ContrastSetting', ), # (3 numbers: 1. Contrast, 2. Min, 3. Max)
0x0506: ('SharpnessSetting', ), # (3 numbers: 1. Sharpness, 2. Min, 3. Max)
0x0507: ('ColorSpace',
{0: 'sRGB',
1: 'Adobe RGB',
2: 'Pro Photo RGB'}),
0x0509: ('SceneMode',
{0: 'Standard',
6: 'Auto',
7: 'Sport',
8: 'Portrait',
9: 'Landscape+Portrait',
10: 'Landscape',
11: 'Night scene',
13: 'Panorama',
16: 'Landscape+Portrait',
17: 'Night+Portrait',
19: 'Fireworks',
20: 'Sunset',
22: 'Macro',
25: 'Documents',
26: 'Museum',
28: 'Beach&Snow',
30: 'Candle',
35: 'Underwater Wide1',
36: 'Underwater Macro',
39: 'High Key',
40: 'Digital Image Stabilization',
44: 'Underwater Wide2',
45: 'Low Key',
46: 'Children',
48: 'Nature Macro'}),
0x050a: ('NoiseReduction',
{0: 'Off',
1: 'Noise Reduction',
2: 'Noise Filter',
3: 'Noise Reduction + Noise Filter',
4: 'Noise Filter (ISO Boost)',
5: 'Noise Reduction + Noise Filter (ISO Boost)'}),
0x050b: ('DistortionCorrection',
{0: 'Off',
1: 'On'}),
0x050c: ('ShadingCompensation',
{0: 'Off',
1: 'On'}),
0x050d: ('CompressionFactor', ),
0x050f: ('Gradation',
{'-1 -1 1': 'Low Key',
'0 -1 1': 'Normal',
'1 -1 1': 'High Key'}),
0x0520: ('PictureMode',
{1: 'Vivid',
2: 'Natural',
3: 'Muted',
256: 'Monotone',
512: 'Sepia'}),
0x0521: ('PictureModeSaturation', ),
0x0522: ('PictureModeHue?', ),
0x0523: ('PictureModeContrast', ),
0x0524: ('PictureModeSharpness', ),
0x0525: ('PictureModeBWFilter',
{0: 'n/a',
1: 'Neutral',
2: 'Yellow',
3: 'Orange',
4: 'Red',
5: 'Green'}),
0x0526: ('PictureModeTone',
{0: 'n/a',
1: 'Neutral',
2: 'Sepia',
3: 'Blue',
4: 'Purple',
5: 'Green'}),
0x0600: ('Sequence', ), # 2 or 3 numbers: 1. Mode, 2. Shot number, 3. Mode bits
0x0601: ('PanoramaMode', ), # (2 numbers: 1. Mode, 2. Shot number)
0x0603: ('ImageQuality2',
{1: 'SQ',
2: 'HQ',
3: 'SHQ',
4: 'RAW'}),
0x0901: ('ManometerReading', ),
}
MAKERNOTE_CASIO_TAGS={
0x0001: ('RecordingMode',
{1: 'Single Shutter',
2: 'Panorama',
3: 'Night Scene',
4: 'Portrait',
5: 'Landscape'}),
0x0002: ('Quality',
{1: 'Economy',
2: 'Normal',
3: 'Fine'}),
0x0003: ('FocusingMode',
{2: 'Macro',
3: 'Auto Focus',
4: 'Manual Focus',
5: 'Infinity'}),
0x0004: ('FlashMode',
{1: 'Auto',
2: 'On',
3: 'Off',
4: 'Red Eye Reduction'}),
0x0005: ('FlashIntensity',
{11: 'Weak',
13: 'Normal',
15: 'Strong'}),
0x0006: ('Object Distance', ),
0x0007: ('WhiteBalance',
{1: 'Auto',
2: 'Tungsten',
3: 'Daylight',
4: 'Fluorescent',
5: 'Shade',
129: 'Manual'}),
0x000B: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0x000C: ('Contrast',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x000D: ('Saturation',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x0014: ('CCDSpeed',
{64: 'Normal',
80: 'Normal',
100: 'High',
125: '+1.0',
244: '+3.0',
250: '+2.0'}),
}
MAKERNOTE_FUJIFILM_TAGS={
0x0000: ('NoteVersion', make_string),
0x1000: ('Quality', ),
0x1001: ('Sharpness',
{1: 'Soft',
2: 'Soft',
3: 'Normal',
4: 'Hard',
5: 'Hard'}),
0x1002: ('WhiteBalance',
{0: 'Auto',
256: 'Daylight',
512: 'Cloudy',
768: 'DaylightColor-Fluorescent',
769: 'DaywhiteColor-Fluorescent',
770: 'White-Fluorescent',
1024: 'Incandescent',
3840: 'Custom'}),
0x1003: ('Color',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1004: ('Tone',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1010: ('FlashMode',
{0: 'Auto',
1: 'On',
2: 'Off',
3: 'Red Eye Reduction'}),
0x1011: ('FlashStrength', ),
0x1020: ('Macro',
{0: 'Off',
1: 'On'}),
0x1021: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1030: ('SlowSync',
{0: 'Off',
1: 'On'}),
0x1031: ('PictureMode',
{0: 'Auto',
1: 'Portrait',
2: 'Landscape',
4: 'Sports',
5: 'Night',
6: 'Program AE',
256: 'Aperture Priority AE',
512: 'Shutter Priority AE',
768: 'Manual Exposure'}),
0x1100: ('MotorOrBracket',
{0: 'Off',
1: 'On'}),
0x1300: ('BlurWarning',
{0: 'Off',
1: 'On'}),
0x1301: ('FocusWarning',
{0: 'Off',
1: 'On'}),
0x1302: ('AEWarning',
{0: 'Off',
1: 'On'}),
}
MAKERNOTE_CANON_TAGS = {
0x0006: ('ImageType', ),
0x0007: ('FirmwareVersion', ),
0x0008: ('ImageNumber', ),
0x0009: ('OwnerName', ),
}
MAKERNOTE_CANON_TAG_0x001 = {
1: ('Macromode',
{1: 'Macro',
2: 'Normal'}),
2: ('SelfTimer', ),
3: ('Quality',
{2: 'Normal',
3: 'Fine',
5: 'Superfine'}),
4: ('FlashMode',
{0: 'Flash Not Fired',
1: 'Auto',
2: 'On',
3: 'Red-Eye Reduction',
4: 'Slow Synchro',
5: 'Auto + Red-Eye Reduction',
6: 'On + Red-Eye Reduction',
16: 'external flash'}),
5: ('ContinuousDriveMode',
{0: 'Single Or Timer',
1: 'Continuous'}),
7: ('FocusMode',
{0: 'One-Shot',
1: 'AI Servo',
2: 'AI Focus',
3: 'MF',
4: 'Single',
5: 'Continuous',
6: 'MF'}),
10: ('ImageSize',
{0: 'Large',
1: 'Medium',
2: 'Small'}),
11: ('EasyShootingMode',
{0: 'Full Auto',
1: 'Manual',
2: 'Landscape',
3: 'Fast Shutter',
4: 'Slow Shutter',
5: 'Night',
6: 'B&W',
7: 'Sepia',
8: 'Portrait',
9: 'Sports',
10: 'Macro/Close-Up',
11: 'Pan Focus'}),
12: ('DigitalZoom',
{0: 'None',
1: '2x',
2: '4x'}),
13: ('Contrast',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
14: ('Saturation',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
15: ('Sharpness',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
16: ('ISO',
{0: 'See ISOSpeedRatings Tag',
15: 'Auto',
16: '50',
17: '100',
18: '200',
19: '400'}),
17: ('MeteringMode',
{3: 'Evaluative',
4: 'Partial',
5: 'Center-weighted'}),
18: ('FocusType',
{0: 'Manual',
1: 'Auto',
3: 'Close-Up (Macro)',
8: 'Locked (Pan Mode)'}),
19: ('AFPointSelected',
{0x3000: 'None (MF)',
0x3001: 'Auto-Selected',
0x3002: 'Right',
0x3003: 'Center',
0x3004: 'Left'}),
20: ('ExposureMode',
{0: 'Easy Shooting',
1: 'Program',
2: 'Tv-priority',
3: 'Av-priority',
4: 'Manual',
5: 'A-DEP'}),
23: ('LongFocalLengthOfLensInFocalUnits', ),
24: ('ShortFocalLengthOfLensInFocalUnits', ),
25: ('FocalUnitsPerMM', ),
28: ('FlashActivity',
{0: 'Did Not Fire',
1: 'Fired'}),
29: ('FlashDetails',
{14: 'External E-TTL',
13: 'Internal Flash',
11: 'FP Sync Used',
7: '2nd("Rear")-Curtain Sync Used',
4: 'FP Sync Enabled'}),
32: ('FocusMode',
{0: 'Single',
1: 'Continuous'}),
}
MAKERNOTE_CANON_TAG_0x004 = {
7: ('WhiteBalance',
{0: 'Auto',
1: 'Sunny',
2: 'Cloudy',
3: 'Tungsten',
4: 'Fluorescent',
5: 'Flash',
6: 'Custom'}),
9: ('SequenceNumber', ),
14: ('AFPointUsed', ),
15: ('FlashBias',
{0xFFC0: '-2 EV',
0xFFCC: '-1.67 EV',
0xFFD0: '-1.50 EV',
0xFFD4: '-1.33 EV',
0xFFE0: '-1 EV',
0xFFEC: '-0.67 EV',
0xFFF0: '-0.50 EV',
0xFFF4: '-0.33 EV',
0x0000: '0 EV',
0x000C: '0.33 EV',
0x0010: '0.50 EV',
0x0014: '0.67 EV',
0x0020: '1 EV',
0x002C: '1.33 EV',
0x0030: '1.50 EV',
0x0034: '1.67 EV',
0x0040: '2 EV'}),
19: ('SubjectDistance', ),
}
def s2n_motorola(str):
x = 0
for c in str:
x = (x << 8) | ord(c)
return x
def s2n_intel(str):
x = 0
y = 0L
for c in str:
x = x | (ord(c) << y)
y = y + 8
return x
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
class Ratio:
def __init__(self, num, den):
self.num = num
self.den = den
def __repr__(self):
self.reduce()
if self.den == 1:
return str(self.num)
return '%d/%d' % (self.num, self.den)
def reduce(self):
div = gcd(self.num, self.den)
if div > 1:
self.num = self.num / div
self.den = self.den / div
class IFD_Tag:
def __init__(self, printable, tag, field_type, values, field_offset,
field_length):
# printable version of data
self.printable = printable
# tag ID number
self.tag = tag
# field type as index into FIELD_TYPES
self.field_type = field_type
# offset of start of field in bytes from beginning of IFD
self.field_offset = field_offset
# length of data field in bytes
self.field_length = field_length
# either a string or array of data items
self.values = values
def __str__(self):
return self.printable
def __repr__(self):
return '(0x%04X) %s=%s @ %d' % (self.tag,
FIELD_TYPES[self.field_type][2],
self.printable,
self.field_offset)
class EXIF_header:
def __init__(self, file, endian, offset, fake_exif, strict, debug=0):
self.file = file
self.endian = endian
self.offset = offset
self.fake_exif = fake_exif
self.strict = strict
self.debug = debug
self.tags = {}
# convert slice to integer, based on sign and endian flags
# usually this offset is assumed to be relative to the beginning of the
# start of the EXIF information. For some cameras that use relative tags,
# this offset may be relative to some other starting point.
def s2n(self, offset, length, signed=0):
self.file.seek(self.offset+offset)
slice=self.file.read(length)
if self.endian == 'I':
val=s2n_intel(slice)
else:
val=s2n_motorola(slice)
# Sign extension ?
if signed:
msb=1L << (8*length-1)
if val & msb:
val=val-(msb << 1)
return val
# convert offset to string
def n2s(self, offset, length):
s = ''
for dummy in range(length):
if self.endian == 'I':
s = s + chr(offset & 0xFF)
else:
s = chr(offset & 0xFF) + s
offset = offset >> 8
return s
# return first IFD
def first_IFD(self):
return self.s2n(4, 4)
# return pointer to next IFD
def next_IFD(self, ifd):
entries=self.s2n(ifd, 2)
return self.s2n(ifd+2+12*entries, 4)
# return list of IFDs in header
def list_IFDs(self):
i=self.first_IFD()
a=[]
while i:
a.append(i)
i=self.next_IFD(i)
return a
# return list of entries in this IFD
def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS, relative=0, stop_tag='UNDEF'):
entries=self.s2n(ifd, 2)
for i in range(entries):
# entry is index of start of this IFD in the file
entry = ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
# get tag name early to avoid errors, help debug
tag_entry = dict.get(tag)
if tag_entry:
tag_name = tag_entry[0]
else:
tag_name = 'Tag 0x%04X' % tag
# ignore certain tags for faster processing
if not (not detailed and tag in IGNORE_TAGS):
field_type = self.s2n(entry + 2, 2)
# unknown field type
if not 0 < field_type < len(FIELD_TYPES):
if not self.strict:
continue
else:
raise ValueError('unknown type %d in tag 0x%04X' % (field_type, tag))
typelen = FIELD_TYPES[field_type][0]
count = self.s2n(entry + 4, 4)
# Adjust for tag id/type/count (2+2+4 bytes)
# Now we point at either the data or the 2nd level offset
offset = entry + 8
# If the value fits in 4 bytes, it is inlined, else we
# need to jump ahead again.
if count * typelen > 4:
# offset is not the value; it's a pointer to the value
# if relative we set things up so s2n will seek to the right
# place when it adds self.offset. Note that this 'relative'
# is for the Nikon type 3 makernote. Other cameras may use
# other relative offsets, which would have to be computed here
# slightly differently.
if relative:
tmp_offset = self.s2n(offset, 4)
offset = tmp_offset + ifd - 8
if self.fake_exif:
offset = offset + 18
else:
offset = self.s2n(offset, 4)
field_offset = offset
if field_type == 2:
# special case: null-terminated ASCII string
# XXX investigate
# sometimes gets too big to fit in int value
if count != 0 and count < (2**31):
self.file.seek(self.offset + offset)
values = self.file.read(count)
#print values
# Drop any garbage after a null.
values = values.split('\x00', 1)[0]
else:
values = ''
else:
values = []
signed = (field_type in [6, 8, 9, 10])
# XXX investigate
# some entries get too big to handle could be malformed
# file or problem with self.s2n
if count < 1000:
for dummy in range(count):
if field_type in (5, 10):
# a ratio
value = Ratio(self.s2n(offset, 4, signed),
self.s2n(offset + 4, 4, signed))
else:
value = self.s2n(offset, typelen, signed)
values.append(value)
offset = offset + typelen
# The test above causes problems with tags that are
# supposed to have long values! Fix up one important case.
elif tag_name == 'MakerNote' :
for dummy in range(count):
value = self.s2n(offset, typelen, signed)
values.append(value)
offset = offset + typelen
#else :
# print "Warning: dropping large tag:", tag, tag_name
# now 'values' is either a string or an array
if count == 1 and field_type != 2:
printable=str(values[0])
elif count > 50 and len(values) > 20 :
printable=str( values[0:20] )[0:-1] + ", ... ]"
else:
printable=str(values)
# compute printable version of values
if tag_entry:
if len(tag_entry) != 1:
# optional 2nd tag element is present
if callable(tag_entry[1]):
# call mapping function
printable = tag_entry[1](values)
else:
printable = ''
for i in values:
# use lookup table for this tag
printable += tag_entry[1].get(i, repr(i))
self.tags[ifd_name + ' ' + tag_name] = IFD_Tag(printable, tag,
field_type,
values, field_offset,
count * typelen)
if self.debug:
print ' debug: %s: %s' % (tag_name,
repr(self.tags[ifd_name + ' ' + tag_name]))
if tag_name == stop_tag:
break
# extract uncompressed TIFF thumbnail (like pulling teeth)
# we take advantage of the pre-existing layout in the thumbnail IFD as
# much as possible
def extract_TIFF_thumbnail(self, thumb_ifd):
entries = self.s2n(thumb_ifd, 2)
# this is header plus offset to IFD ...
if self.endian == 'M':
tiff = 'MM\x00*\x00\x00\x00\x08'
else:
tiff = 'II*\x00\x08\x00\x00\x00'
# ... plus thumbnail IFD data plus a null "next IFD" pointer
self.file.seek(self.offset+thumb_ifd)
tiff += self.file.read(entries*12+2)+'\x00\x00\x00\x00'
# fix up large value offset pointers into data area
for i in range(entries):
entry = thumb_ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
field_type = self.s2n(entry+2, 2)
typelen = FIELD_TYPES[field_type][0]
count = self.s2n(entry+4, 4)
oldoff = self.s2n(entry+8, 4)
# start of the 4-byte pointer area in entry
ptr = i * 12 + 18
# remember strip offsets location
if tag == 0x0111:
strip_off = ptr
strip_len = count * typelen
# is it in the data area?
if count * typelen > 4:
# update offset pointer (nasty "strings are immutable" crap)
# should be able to say "tiff[ptr:ptr+4]=newoff"
newoff = len(tiff)
tiff = tiff[:ptr] + self.n2s(newoff, 4) + tiff[ptr+4:]
# remember strip offsets location
if tag == 0x0111:
strip_off = newoff
strip_len = 4
# get original data and store it
self.file.seek(self.offset + oldoff)
tiff += self.file.read(count * typelen)
# add pixel strips and update strip offset info
old_offsets = self.tags['Thumbnail StripOffsets'].values
old_counts = self.tags['Thumbnail StripByteCounts'].values
for i in range(len(old_offsets)):
# update offset pointer (more nasty "strings are immutable" crap)
offset = self.n2s(len(tiff), strip_len)
tiff = tiff[:strip_off] + offset + tiff[strip_off + strip_len:]
strip_off += strip_len
# add pixel strip to end
self.file.seek(self.offset + old_offsets[i])
tiff += self.file.read(old_counts[i])
self.tags['TIFFThumbnail'] = tiff
# decode all the camera-specific MakerNote formats
# Note is the data that comprises this MakerNote. The MakerNote will
# likely have pointers in it that point to other parts of the file. We'll
# use self.offset as the starting point for most of those pointers, since
# they are relative to the beginning of the file.
#
# If the MakerNote is in a newer format, it may use relative addressing
# within the MakerNote. In that case we'll use relative addresses for the
# pointers.
#
# As an aside: it's not just to be annoying that the manufacturers use
# relative offsets. It's so that if the makernote has to be moved by the
# picture software all of the offsets don't have to be adjusted. Overall,
# this is probably the right strategy for makernotes, though the spec is
# ambiguous. (The spec does not appear to imagine that makernotes would
# follow EXIF format internally. Once they did, it's ambiguous whether
# the offsets should be from the header at the start of all the EXIF info,
# or from the header at the start of the makernote.)
def decode_maker_note(self):
note = self.tags['EXIF MakerNote']
# Some apps use MakerNote tags but do not use a format for which we
# have a description, so just do a raw dump for these.
#if self.tags.has_key('Image Make'):
make = self.tags['Image Make'].printable
#else:
# make = ''
# model = self.tags['Image Model'].printable # unused
# Nikon
# The maker note usually starts with the word Nikon, followed by the
# type of the makernote (1 or 2, as a short). If the word Nikon is
# not at the start of the makernote, it's probably type 2, since some
# cameras work that way.
if 'NIKON' in make:
if note.values[0:7] == [78, 105, 107, 111, 110, 0, 1]:
if self.debug:
print "Looks like a type 1 Nikon MakerNote."
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_NIKON_OLDER_TAGS)
elif note.values[0:7] == [78, 105, 107, 111, 110, 0, 2]:
if self.debug:
print "Looks like a labeled type 2 Nikon MakerNote"
if note.values[12:14] != [0, 42] and note.values[12:14] != [42L, 0L]:
raise ValueError("Missing marker tag '42' in MakerNote.")
# skip the Makernote label and the TIFF header
self.dump_IFD(note.field_offset+10+8, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS, relative=1)
else:
# E99x or D1
if self.debug:
print "Looks like an unlabeled type 2 Nikon MakerNote"
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS)
return
# Olympus
if make.startswith('OLYMPUS'):
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_OLYMPUS_TAGS)
# XXX TODO
#for i in (('MakerNote Tag 0x2020', MAKERNOTE_OLYMPUS_TAG_0x2020),):
# self.decode_olympus_tag(self.tags[i[0]].values, i[1])
#return
# Casio
if 'CASIO' in make or 'Casio' in make:
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CASIO_TAGS)
return
# Fujifilm
if make == 'FUJIFILM':
# bug: everything else is "Motorola" endian, but the MakerNote
# is "Intel" endian
endian = self.endian
self.endian = 'I'
# bug: IFD offsets are from beginning of MakerNote, not
# beginning of file header
offset = self.offset
self.offset += note.field_offset
# process note with bogus values (note is actually at offset 12)
self.dump_IFD(12, 'MakerNote', dict=MAKERNOTE_FUJIFILM_TAGS)
# reset to correct values
self.endian = endian
self.offset = offset
return
# Canon
if make == 'Canon':
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CANON_TAGS)
for i in (('MakerNote Tag 0x0001', MAKERNOTE_CANON_TAG_0x001),
('MakerNote Tag 0x0004', MAKERNOTE_CANON_TAG_0x004)):
self.canon_decode_tag(self.tags[i[0]].values, i[1])
return
# XXX TODO decode Olympus MakerNote tag based on offset within tag
def olympus_decode_tag(self, value, dict):
pass
# decode Canon MakerNote tag based on offset within tag
# see http://www.burren.cx/david/canon.html by David Burren
def canon_decode_tag(self, value, dict):
for i in range(1, len(value)):
x=dict.get(i, ('Unknown', ))
if self.debug:
print i, x
name=x[0]
if len(x) > 1:
val=x[1].get(value[i], 'Unknown')
else:
val=value[i]
# it's not a real IFD Tag but we fake one to make everybody
# happy. this will have a "proprietary" type
self.tags['MakerNote '+name]=IFD_Tag(str(val), None, 0, None,
None, None)
def process_file(f, stop_tag='UNDEF', details=True, strict=False, debug=False):
# yah it's cheesy...
global detailed
detailed = details
# by default do not fake an EXIF beginning
fake_exif = 0
# determine whether it's a JPEG or TIFF
data = f.read(12)
if data[0:4] in ['II*\x00', 'MM\x00*']:
# it's a TIFF file
f.seek(0)
endian = f.read(1)
f.read(1)
offset = 0
elif data[0:2] == '\xFF\xD8':
# it's a JPEG file
while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM', 'Phot'):
length = ord(data[4])*256+ord(data[5])
f.read(length-8)
# fake an EXIF beginning of file
data = '\xFF\x00'+f.read(10)
fake_exif = 1
if data[2] == '\xFF' and data[6:10] == 'Exif':
# detected EXIF header
offset = f.tell()
endian = f.read(1)
else:
# no EXIF information
return {}
else:
# file format not recognized
return {}
# deal with the EXIF info we found
if debug:
print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format'
hdr = EXIF_header(f, endian, offset, fake_exif, strict, debug)
ifd_list = hdr.list_IFDs()
ctr = 0
for i in ifd_list:
if ctr == 0:
IFD_name = 'Image'
elif ctr == 1:
IFD_name = 'Thumbnail'
thumb_ifd = i
else:
IFD_name = 'IFD %d' % ctr
if debug:
print ' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i)
hdr.dump_IFD(i, IFD_name, stop_tag=stop_tag)
# EXIF IFD
exif_off = hdr.tags.get(IFD_name+' ExifOffset')
if exif_off:
if debug:
print ' EXIF SubIFD at offset %d:' % exif_off.values[0]
hdr.dump_IFD(exif_off.values[0], 'EXIF', stop_tag=stop_tag)
# Interoperability IFD contained in EXIF IFD
intr_off = hdr.tags.get('EXIF SubIFD InteroperabilityOffset')
if intr_off:
if debug:
print ' EXIF Interoperability SubSubIFD at offset %d:' \
% intr_off.values[0]
hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability',
dict=INTR_TAGS, stop_tag=stop_tag)
# GPS IFD
gps_off = hdr.tags.get(IFD_name+' GPSInfo')
if gps_off:
if debug:
print ' GPS SubIFD at offset %d:' % gps_off.values[0]
hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS, stop_tag=stop_tag)
ctr += 1
# extract uncompressed TIFF thumbnail
thumb = hdr.tags.get('Thumbnail Compression')
if thumb and thumb.printable == 'Uncompressed TIFF':
hdr.extract_TIFF_thumbnail(thumb_ifd)
# JPEG thumbnail (thankfully the JPEG data is stored as a unit)
thumb_off = hdr.tags.get('Thumbnail JPEGInterchangeFormat')
if thumb_off:
f.seek(offset+thumb_off.values[0])
size = hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0]
hdr.tags['JPEGThumbnail'] = f.read(size)
# deal with MakerNote contained in EXIF IFD
# (Some apps use MakerNote tags but do not use a format for which we
# have a description, do not process these).
if 'EXIF MakerNote' in hdr.tags and 'Image Make' in hdr.tags and detailed:
hdr.decode_maker_note()
# Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote
# since it's not allowed in a uncompressed TIFF IFD
if 'JPEGThumbnail' not in hdr.tags:
thumb_off=hdr.tags.get('MakerNote JPEGThumbnail')
if thumb_off:
f.seek(offset+thumb_off.values[0])
hdr.tags['JPEGThumbnail']=file.read(thumb_off.field_length)
return hdr.tags
def usage(exit_status):
msg = 'Usage: EXIF.py [OPTIONS] file1 [file2 ...]\n'
msg += 'Extract EXIF information from digital camera image files.\n\nOptions:\n'
msg += '-q --quick Do not process MakerNotes.\n'
msg += '-t TAG --stop-tag TAG Stop processing when this tag is retrieved.\n'
msg += '-s --strict Run in strict mode (stop on errors).\n'
msg += '-d --debug Run in debug mode (display extra info).\n'
print msg
sys.exit(exit_status)
if __name__ == '__main__':
import sys
import getopt
# parse command line options/arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hqsdt:v", ["help", "quick", "strict", "debug", "stop-tag="])
except getopt.GetoptError:
usage(2)
if args == []:
usage(2)
detailed = True
stop_tag = 'UNDEF'
debug = False
strict = False
for o, a in opts:
if o in ("-h", "--help"):
usage(0)
if o in ("-q", "--quick"):
detailed = False
if o in ("-t", "--stop-tag"):
stop_tag = a
if o in ("-s", "--strict"):
strict = True
if o in ("-d", "--debug"):
debug = True
# output info for each file
for filename in args:
try:
file=open(filename, 'rb')
except:
print "'%s' is unreadable\n"%filename
continue
print filename + ':'
# get the tags
data = process_file(file, stop_tag=stop_tag, details=detailed, strict=strict, debug=debug)
if not data:
print 'No EXIF information found'
continue
x=data.keys()
x.sort()
for i in x:
if i in ('JPEGThumbnail', 'TIFFThumbnail'):
continue
try:
print ' %s (%s): %s' % \
(i, FIELD_TYPES[data[i].field_type][2], data[i].printable)
except:
print 'error', i, '"', data[i], '"'
if 'JPEGThumbnail' in data:
print 'File has JPEG thumbnail'
print
|
import distutils.sysconfig
import sys
import os
import traceback
from threading import Thread
import time
class reboot(Thread):
def __init__ (self,api, target):
Thread.__init__(self)
self.api = api
self.target = target
def run(self):
time.sleep(30)
self.api.reboot(self.target)
plib = distutils.sysconfig.get_python_lib()
mod_path="%s/cobbler" % plib
sys.path.insert(0, mod_path)
def register():
# this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.
# the return of this method indicates the trigger type
return "/var/lib/cobbler/triggers/install/post/*"
def run(api, args, logger):
# FIXME: make everything use the logger
settings = api.settings()
objtype = args[0] # "target" or "profile"
name = args[1] # name of target or profile
boot_ip = args[2] # ip or "?"
if objtype == "system":
target = api.find_system(name)
else:
return 0
if target and 'postreboot' in target.ks_meta:
# Run this in a thread so the system has a chance to finish and umount the filesystem
current = reboot(api, target)
current.start()
return 0
|
import os
import multiprocessing
import random
import signal
import time
from helpers import unittest, with_config
import luigi.rpc
import luigi.server
from luigi.scheduler import CentralPlannerScheduler
from tornado.testing import AsyncHTTPTestCase
class ServerTestBase(AsyncHTTPTestCase):
def get_app(self):
return luigi.server.app(CentralPlannerScheduler())
def setUp(self):
super(ServerTestBase, self).setUp()
self._old_fetch = luigi.rpc.RemoteScheduler._fetch
def _fetch(obj, url, body, *args, **kwargs):
response = self.fetch(url, body=body, method='POST')
if response.code >= 400:
raise luigi.rpc.RPCError(
'Errror when connecting to remote scheduler'
)
return response.body.decode('utf-8')
luigi.rpc.RemoteScheduler._fetch = _fetch
def tearDown(self):
super(ServerTestBase, self).tearDown()
luigi.rpc.RemoteScheduler._fetch = self._old_fetch
class ServerTest(ServerTestBase):
def test_visualizer(self):
page = self.fetch('/').body
self.assertTrue(page.find(b'<title>') != -1)
def _test_404(self, path):
response = self.fetch(path)
self.assertEqual(response.code, 404)
def test_404(self):
self._test_404('/foo')
def test_api_404(self):
self._test_404('/api/foo')
class ServerTestRun(unittest.TestCase):
"""Test to start and stop the server in a more "standard" way
"""
def remove_state(self):
if os.path.exists('/tmp/luigi-test-server-state'):
os.remove('/tmp/luigi-test-server-state')
@with_config({'scheduler': {'state_path': '/tmp/luigi-test-server-state'}})
def run_server(self):
luigi.server.run(api_port=self._api_port, address='127.0.0.1')
def start_server(self):
self._api_port = random.randint(1024, 9999)
self._process = multiprocessing.Process(target=self.run_server)
self._process.start()
time.sleep(0.1) # wait for server to start
self.sch = luigi.rpc.RemoteScheduler(host='localhost', port=self._api_port)
self.sch._wait = lambda: None
def stop_server(self):
self._process.terminate()
self._process.join(1)
if self._process.is_alive():
os.kill(self._process.pid, signal.SIGKILL)
def setUp(self):
self.remove_state()
self.start_server()
def tearDown(self):
self.remove_state()
self.stop_server()
def test_ping(self):
self.sch.ping(worker='xyz')
def test_raw_ping(self):
self.sch._request('/api/ping', {'worker': 'xyz'})
def test_raw_ping_extended(self):
self.sch._request('/api/ping', {'worker': 'xyz', 'foo': 'bar'})
def test_404(self):
with self.assertRaises(luigi.rpc.RPCError):
self.sch._request('/api/fdsfds', {'dummy': 1})
def test_save_state(self):
self.sch.add_task('X', 'B', deps=('A',))
self.sch.add_task('X', 'A')
self.assertEqual(self.sch.get_work('X')['task_id'], 'A')
self.stop_server()
self.start_server()
work = self.sch.get_work('X')['running_tasks'][0]
self.assertEqual(work['task_id'], 'A')
if __name__ == '__main__':
unittest.main()
|
from ..data import BoletoData, custom_property
class BoletoHsbc(BoletoData):
'''
Gera Dados necessários para criação de boleto para o banco HSBC
'''
numero_documento = custom_property('numero_documento', 13)
def __init__(self):
super(BoletoHsbc, self).__init__()
self.codigo_banco = "399"
self.logo_image = "logo_bancohsbc.jpg"
self.carteira = 'CNR'
def format_nosso_numero(self):
nosso_numero = self.nosso_numero
# Primeiro DV
nosso_numero += str(self.modulo11(nosso_numero))
# Cobrança com vencimento = 4
nosso_numero += "4"
# Segundo DV
sum_params = int(nosso_numero) + int(self.conta_cedente)
sum_params += int(self.data_vencimento.strftime('%d%m%y'))
sum_params = str(sum_params)
nosso_numero += str(self.modulo11(sum_params))
return nosso_numero
@property
def data_vencimento_juliano(self):
data_vencimento = str(self.data_vencimento.timetuple().tm_yday)
data_vencimento += str(self.data_vencimento.year)[-1:]
return data_vencimento.zfill(4)
@property
def campo_livre(self):
content = "%7s%13s%4s2" % (self.conta_cedente,
self.nosso_numero,
self.data_vencimento_juliano)
return content
class BoletoHsbcComRegistro(BoletoData):
'''
Gera Dados necessários para criação de boleto para o banco HSBC
com registro
'''
# Nosso numero (sem dv) sao 10 digitos
nosso_numero = custom_property('nosso_numero', 10)
def __init__(self):
super(BoletoHsbcComRegistro, self).__init__()
self.codigo_banco = "399"
self.logo_image = "logo_bancohsbc.jpg"
self.carteira = 'CSB'
self.especie_documento = 'PD'
@property
def dv_nosso_numero(self):
resto = self.modulo11(self.nosso_numero, 7, 1)
if resto == 0 or resto == 1:
return 0
else:
return 11 - resto
@property
def campo_livre(self):
content = "%10s%1s%4s%7s001" % (self.nosso_numero,
self.dv_nosso_numero,
self.agencia_cedente.split('-')[0],
self.conta_cedente.split('-')[0])
return content
|
import Gaffer
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.Plane,
"description",
"""
Produces scenes containing a plane.
""",
plugs = {
"dimensions" : [
"description",
"""
The size of the plane in the X and Y directions.
""",
],
"divisions" : [
"description",
"""
The number of subdivisions of the plane in the
X and Y directions.
""",
],
}
)
|
import unittest
from test import support
import operator
class Number:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __cmp__(self, other):
raise support.TestFailed("Number.__cmp__() should not be called")
def __repr__(self):
return "Number(%r)" % (self.x, )
class Vector:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v
__hash__ = None # Vectors cannot be hashed
def __bool__(self):
raise TypeError("Vectors cannot be used in Boolean contexts")
def __cmp__(self, other):
raise support.TestFailed("Vector.__cmp__() should not be called")
def __repr__(self):
return "Vector(%r)" % (self.data, )
def __lt__(self, other):
return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
def __le__(self, other):
return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
def __eq__(self, other):
return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
def __ne__(self, other):
return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
def __gt__(self, other):
return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
def __ge__(self, other):
return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
def __cast(self, other):
if isinstance(other, Vector):
other = other.data
if len(self.data) != len(other):
raise ValueError("Cannot compare vectors of different length")
return other
opmap = {
"lt": (lambda a,b: a< b, operator.lt, operator.__lt__),
"le": (lambda a,b: a<=b, operator.le, operator.__le__),
"eq": (lambda a,b: a==b, operator.eq, operator.__eq__),
"ne": (lambda a,b: a!=b, operator.ne, operator.__ne__),
"gt": (lambda a,b: a> b, operator.gt, operator.__gt__),
"ge": (lambda a,b: a>=b, operator.ge, operator.__ge__)
}
class VectorTest(unittest.TestCase):
def checkfail(self, error, opname, *args):
for op in opmap[opname]:
self.assertRaises(error, op, *args)
def checkequal(self, opname, a, b, expres):
for op in opmap[opname]:
realres = op(a, b)
# can't use assertEqual(realres, expres) here
self.assertEqual(len(realres), len(expres))
for i in range(len(realres)):
# results are bool, so we can use "is" here
self.assertTrue(realres[i] is expres[i])
def test_mixed(self):
# check that comparisons involving Vector objects
# which return rich results (i.e. Vectors with itemwise
# comparison results) work
a = Vector(range(2))
b = Vector(range(3))
# all comparisons should fail for different length
for opname in opmap:
self.checkfail(ValueError, opname, a, b)
a = list(range(5))
b = 5 * [2]
# try mixed arguments (but not (a, b) as that won't return a bool vector)
args = [(a, Vector(b)), (Vector(a), b), (Vector(a), Vector(b))]
for (a, b) in args:
self.checkequal("lt", a, b, [True, True, False, False, False])
self.checkequal("le", a, b, [True, True, True, False, False])
self.checkequal("eq", a, b, [False, False, True, False, False])
self.checkequal("ne", a, b, [True, True, False, True, True ])
self.checkequal("gt", a, b, [False, False, False, True, True ])
self.checkequal("ge", a, b, [False, False, True, True, True ])
for ops in opmap.values():
for op in ops:
# calls __bool__, which should fail
self.assertRaises(TypeError, bool, op(a, b))
class NumberTest(unittest.TestCase):
def test_basic(self):
# Check that comparisons involving Number objects
# give the same results give as comparing the
# corresponding ints
for a in range(3):
for b in range(3):
for typea in (int, Number):
for typeb in (int, Number):
if typea==typeb==int:
continue # the combination int, int is useless
ta = typea(a)
tb = typeb(b)
for ops in opmap.values():
for op in ops:
realoutcome = op(a, b)
testoutcome = op(ta, tb)
self.assertEqual(realoutcome, testoutcome)
def checkvalue(self, opname, a, b, expres):
for typea in (int, Number):
for typeb in (int, Number):
ta = typea(a)
tb = typeb(b)
for op in opmap[opname]:
realres = op(ta, tb)
realres = getattr(realres, "x", realres)
self.assertTrue(realres is expres)
def test_values(self):
# check all operators and all comparison results
self.checkvalue("lt", 0, 0, False)
self.checkvalue("le", 0, 0, True )
self.checkvalue("eq", 0, 0, True )
self.checkvalue("ne", 0, 0, False)
self.checkvalue("gt", 0, 0, False)
self.checkvalue("ge", 0, 0, True )
self.checkvalue("lt", 0, 1, True )
self.checkvalue("le", 0, 1, True )
self.checkvalue("eq", 0, 1, False)
self.checkvalue("ne", 0, 1, True )
self.checkvalue("gt", 0, 1, False)
self.checkvalue("ge", 0, 1, False)
self.checkvalue("lt", 1, 0, False)
self.checkvalue("le", 1, 0, False)
self.checkvalue("eq", 1, 0, False)
self.checkvalue("ne", 1, 0, True )
self.checkvalue("gt", 1, 0, True )
self.checkvalue("ge", 1, 0, True )
class MiscTest(unittest.TestCase):
def test_misbehavin(self):
class Misb:
def __lt__(self_, other): return 0
def __gt__(self_, other): return 0
def __eq__(self_, other): return 0
def __le__(self_, other): self.fail("This shouldn't happen")
def __ge__(self_, other): self.fail("This shouldn't happen")
def __ne__(self_, other): self.fail("This shouldn't happen")
a = Misb()
b = Misb()
self.assertEqual(a<b, 0)
self.assertEqual(a==b, 0)
self.assertEqual(a>b, 0)
def test_not(self):
# Check that exceptions in __bool__ are properly
# propagated by the not operator
import operator
class Exc(Exception):
pass
class Bad:
def __bool__(self):
raise Exc
def do(bad):
not bad
for func in (do, operator.not_):
self.assertRaises(Exc, func, Bad())
@support.no_tracing
def test_recursion(self):
# Check that comparison for recursive objects fails gracefully
from collections import UserList
a = UserList()
b = UserList()
a.append(b)
b.append(a)
self.assertRaises(RecursionError, operator.eq, a, b)
self.assertRaises(RecursionError, operator.ne, a, b)
self.assertRaises(RecursionError, operator.lt, a, b)
self.assertRaises(RecursionError, operator.le, a, b)
self.assertRaises(RecursionError, operator.gt, a, b)
self.assertRaises(RecursionError, operator.ge, a, b)
b.append(17)
# Even recursive lists of different lengths are different,
# but they cannot be ordered
self.assertTrue(not (a == b))
self.assertTrue(a != b)
self.assertRaises(RecursionError, operator.lt, a, b)
self.assertRaises(RecursionError, operator.le, a, b)
self.assertRaises(RecursionError, operator.gt, a, b)
self.assertRaises(RecursionError, operator.ge, a, b)
a.append(17)
self.assertRaises(RecursionError, operator.eq, a, b)
self.assertRaises(RecursionError, operator.ne, a, b)
a.insert(0, 11)
b.insert(0, 12)
self.assertTrue(not (a == b))
self.assertTrue(a != b)
self.assertTrue(a < b)
def test_exception_message(self):
class Spam:
pass
tests = [
(lambda: 42 < None, r"'<' .* of 'int' and 'NoneType'"),
(lambda: None < 42, r"'<' .* of 'NoneType' and 'int'"),
(lambda: 42 > None, r"'>' .* of 'int' and 'NoneType'"),
(lambda: "foo" < None, r"'<' .* of 'str' and 'NoneType'"),
(lambda: "foo" >= 666, r"'>=' .* of 'str' and 'int'"),
(lambda: 42 <= None, r"'<=' .* of 'int' and 'NoneType'"),
(lambda: 42 >= None, r"'>=' .* of 'int' and 'NoneType'"),
(lambda: 42 < [], r"'<' .* of 'int' and 'list'"),
(lambda: () > [], r"'>' .* of 'tuple' and 'list'"),
(lambda: None >= None, r"'>=' .* of 'NoneType' and 'NoneType'"),
(lambda: Spam() < 42, r"'<' .* of 'Spam' and 'int'"),
(lambda: 42 < Spam(), r"'<' .* of 'int' and 'Spam'"),
(lambda: Spam() <= Spam(), r"'<=' .* of 'Spam' and 'Spam'"),
]
for i, test in enumerate(tests):
with self.subTest(test=i):
with self.assertRaisesRegex(TypeError, test[1]):
test[0]()
class DictTest(unittest.TestCase):
def test_dicts(self):
# Verify that __eq__ and __ne__ work for dicts even if the keys and
# values don't support anything other than __eq__ and __ne__ (and
# __hash__). Complex numbers are a fine example of that.
import random
imag1a = {}
for i in range(50):
imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
items = list(imag1a.items())
random.shuffle(items)
imag1b = {}
for k, v in items:
imag1b[k] = v
imag2 = imag1b.copy()
imag2[k] = v + 1.0
self.assertEqual(imag1a, imag1a)
self.assertEqual(imag1a, imag1b)
self.assertEqual(imag2, imag2)
self.assertTrue(imag1a != imag2)
for opname in ("lt", "le", "gt", "ge"):
for op in opmap[opname]:
self.assertRaises(TypeError, op, imag1a, imag2)
class ListTest(unittest.TestCase):
def test_coverage(self):
# exercise all comparisons for lists
x = [42]
self.assertIs(x<x, False)
self.assertIs(x<=x, True)
self.assertIs(x==x, True)
self.assertIs(x!=x, False)
self.assertIs(x>x, False)
self.assertIs(x>=x, True)
y = [42, 42]
self.assertIs(x<y, True)
self.assertIs(x<=y, True)
self.assertIs(x==y, False)
self.assertIs(x!=y, True)
self.assertIs(x>y, False)
self.assertIs(x>=y, False)
def test_badentry(self):
# make sure that exceptions for item comparison are properly
# propagated in list comparisons
class Exc(Exception):
pass
class Bad:
def __eq__(self, other):
raise Exc
x = [Bad()]
y = [Bad()]
for op in opmap["eq"]:
self.assertRaises(Exc, op, x, y)
def test_goodentry(self):
# This test exercises the final call to PyObject_RichCompare()
# in Objects/listobject.c::list_richcompare()
class Good:
def __lt__(self, other):
return True
x = [Good()]
y = [Good()]
for op in opmap["lt"]:
self.assertIs(op(x, y), True)
if __name__ == "__main__":
unittest.main()
|
"""
Test connections without the builtin ssl module
Note: Import urllib3 inside the test functions to get the importblocker to work
"""
from ..test_no_ssl import TestWithoutSSL
from dummyserver.testcase import (
HTTPDummyServerTestCase, HTTPSDummyServerTestCase)
class TestHTTPWithoutSSL(HTTPDummyServerTestCase, TestWithoutSSL):
def test_simple(self):
import urllib3
pool = urllib3.HTTPConnectionPool(self.host, self.port)
r = pool.request('GET', '/')
self.assertEqual(r.status, 200, r.data)
class TestHTTPSWithoutSSL(HTTPSDummyServerTestCase, TestWithoutSSL):
def test_simple(self):
import urllib3
pool = urllib3.HTTPSConnectionPool(self.host, self.port)
try:
pool.request('GET', '/')
except urllib3.exceptions.SSLError as e:
self.assertTrue('SSL module is not available' in str(e))
|
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from . import base
class TargetProgrammerInfo(object):
def __init__(self, root_dirs):
self.target_programmer = None
self.settings = base.settings.get_arduino_settings()
self.update(root_dirs)
def update(self, root_dirs):
self.root_dirs = root_dirs
self.check_target_programmer()
def check_target_programmer(self):
programmers = load_programmers(self.root_dirs)
if programmers:
programmer_ids = [
programmer.get_id() for programmer in programmers]
target_programmer_id = self.settings.get(
'target_programmer_id', '')
if not target_programmer_id in programmer_ids:
target_programmer_id = programmer_ids[0]
self.settings.set('target_programmer_id', target_programmer_id)
index = programmer_ids.index(target_programmer_id)
self.target_programmer = programmers[index]
def change_target_programmer(self, programmer_id):
self.settings.set('target_programmer_id', programmer_id)
self.check_target_programmer()
def get_target_programmer(self):
return self.target_programmer
def get_params(self):
params = {}
if self.target_programmer:
params.update(self.target_programmer.get_params())
return params
def load_programmers(root_dirs):
programmers = []
for root_dir in root_dirs:
for package in root_dir.get_packages():
for platform in package.get_platforms():
programmers += platform.get_programmers()
return programmers
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
parse_duration,
)
class NRKBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['NO']
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://%s/mediaelement/%s' % (self._API_HOST, video_id),
video_id, 'Downloading mediaelement JSON')
title = data.get('fullTitle') or data.get('mainTitle') or data['title']
video_id = data.get('id') or video_id
entries = []
conviva = data.get('convivaStatistics') or {}
live = (data.get('mediaElementType') == 'Live' or
data.get('isLive') is True or conviva.get('isLive'))
def make_title(t):
return self._live_title(t) if live else t
media_assets = data.get('mediaAssets')
if media_assets and isinstance(media_assets, list):
def video_id_and_title(idx):
return ((video_id, title) if len(media_assets) == 1
else ('%s-%d' % (video_id, idx), '%s (Part %d)' % (title, idx)))
for num, asset in enumerate(media_assets, 1):
asset_url = asset.get('url')
if not asset_url:
continue
formats = self._extract_akamai_formats(asset_url, video_id)
if not formats:
continue
self._sort_formats(formats)
# Some f4m streams may not work with hdcore in fragments' URLs
for f in formats:
extra_param = f.get('extra_param_to_segment_url')
if extra_param and 'hdcore' in extra_param:
del f['extra_param_to_segment_url']
entry_id, entry_title = video_id_and_title(num)
duration = parse_duration(asset.get('duration'))
subtitles = {}
for subtitle in ('webVtt', 'timedText'):
subtitle_url = asset.get('%sSubtitlesUrl' % subtitle)
if subtitle_url:
subtitles.setdefault('no', []).append({
'url': compat_urllib_parse_unquote(subtitle_url)
})
entries.append({
'id': asset.get('carrierId') or entry_id,
'title': make_title(entry_title),
'duration': duration,
'subtitles': subtitles,
'formats': formats,
})
if not entries:
media_url = data.get('mediaUrl')
if media_url:
formats = self._extract_akamai_formats(media_url, video_id)
self._sort_formats(formats)
duration = parse_duration(data.get('duration'))
entries = [{
'id': video_id,
'title': make_title(title),
'duration': duration,
'formats': formats,
}]
if not entries:
MESSAGES = {
'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet',
'ProgramRightsHasExpired': 'Programmet har gått ut',
'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
}
message_type = data.get('messageType', '')
# Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
if 'IsGeoBlocked' in message_type:
self.raise_geo_restricted(
msg=MESSAGES.get('ProgramIsGeoBlocked'),
countries=self._GEO_COUNTRIES)
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, MESSAGES.get(
message_type, message_type)),
expected=True)
series = conviva.get('seriesName') or data.get('seriesTitle')
episode = conviva.get('episodeName') or data.get('episodeNumberOrDate')
season_number = None
episode_number = None
if data.get('mediaElementType') == 'Episode':
_season_episode = data.get('scoresStatistics', {}).get('springStreamStream') or \
data.get('relativeOriginUrl', '')
EPISODENUM_RE = [
r'/s(?P<season>\d{,2})e(?P<episode>\d{,2})\.',
r'/sesong-(?P<season>\d{,2})/episode-(?P<episode>\d{,2})',
]
season_number = int_or_none(self._search_regex(
EPISODENUM_RE, _season_episode, 'season number',
default=None, group='season'))
episode_number = int_or_none(self._search_regex(
EPISODENUM_RE, _season_episode, 'episode number',
default=None, group='episode'))
thumbnails = None
images = data.get('images')
if images and isinstance(images, dict):
web_images = images.get('webImages')
if isinstance(web_images, list):
thumbnails = [{
'url': image['imageUrl'],
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in web_images if image.get('imageUrl')]
description = data.get('description')
category = data.get('mediaAnalytics', {}).get('category')
common_info = {
'description': description,
'series': series,
'episode': episode,
'season_number': season_number,
'episode_number': episode_number,
'categories': [category] if category else None,
'age_limit': parse_age_limit(data.get('legalAge')),
'thumbnails': thumbnails,
}
vcodec = 'none' if data.get('mediaType') == 'Audio' else None
for entry in entries:
entry.update(common_info)
for f in entry['formats']:
f['vcodec'] = vcodec
points = data.get('shortIndexPoints')
if isinstance(points, list):
chapters = []
for next_num, point in enumerate(points, start=1):
if not isinstance(point, dict):
continue
start_time = parse_duration(point.get('startPoint'))
if start_time is None:
continue
end_time = parse_duration(
data.get('duration')
if next_num == len(points)
else points[next_num].get('startPoint'))
if end_time is None:
continue
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': point.get('title'),
})
if chapters and len(entries) == 1:
entries[0]['chapters'] = chapters
return self.playlist_result(entries, video_id, title, description)
class NRKIE(NRKBaseIE):
_VALID_URL = r'''(?x)
(?:
nrk:|
https?://
(?:
(?:www\.)?nrk\.no/video/PS\*|
v8[-.]psapi\.nrk\.no/mediaelement/
)
)
(?P<id>[^?#&]+)
'''
_API_HOST = 'v8-psapi.nrk.no'
_TESTS = [{
# video
'url': 'http://www.nrk.no/video/PS*150533',
'md5': '2f7f6eeb2aacdd99885f355428715cfa',
'info_dict': {
'id': '150533',
'ext': 'mp4',
'title': 'Dompap og andre fugler i Piip-Show',
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
'duration': 263,
}
}, {
# audio
'url': 'http://www.nrk.no/video/PS*154915',
# MD5 is unstable
'info_dict': {
'id': '154915',
'ext': 'flv',
'title': 'Slik høres internett ut når du er blind',
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
'duration': 20,
}
}, {
'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
'only_matching': True,
}, {
'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70',
'only_matching': True,
}, {
'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
'only_matching': True,
}]
class NRKTVIE(NRKBaseIE):
IE_DESC = 'NRK TV and NRK Radio'
_EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
_VALID_URL = r'''(?x)
https?://
(?:tv|radio)\.nrk(?:super)?\.no/
(?:serie/[^/]+|program)/
(?![Ee]pisodes)%s
(?:/\d{2}-\d{2}-\d{4})?
(?:\#del=(?P<part_id>\d+))?
''' % _EPISODE_RE
_API_HOST = 'psapi-ne.nrk.no'
_TESTS = [{
'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
'md5': '4e9ca6629f09e588ed240fb11619922a',
'info_dict': {
'id': 'MUHH48000314AA',
'ext': 'mp4',
'title': '20 spørsmål 23.05.2014',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'duration': 1741,
'series': '20 spørsmål - TV',
'episode': '23.05.2014',
},
}, {
'url': 'https://tv.nrk.no/program/mdfp15000514',
'info_dict': {
'id': 'MDFP15000514CA',
'ext': 'mp4',
'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
'duration': 4605,
'series': 'Kunnskapskanalen',
'episode': '24.05.2014',
},
'params': {
'skip_download': True,
},
}, {
# single playlist video
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Video is geo restricted'],
'skip': 'particular part is not supported currently',
}, {
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
'playlist': [{
'info_dict': {
'id': 'MSPO40010515AH',
'ext': 'mp4',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 1)',
'description': 'md5:c03aba1e917561eface5214020551b7a',
'duration': 772,
'series': 'Tour de Ski',
'episode': '06.01.2015',
},
'params': {
'skip_download': True,
},
}, {
'info_dict': {
'id': 'MSPO40010515BH',
'ext': 'mp4',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 2)',
'description': 'md5:c03aba1e917561eface5214020551b7a',
'duration': 6175,
'series': 'Tour de Ski',
'episode': '06.01.2015',
},
'params': {
'skip_download': True,
},
}],
'info_dict': {
'id': 'MSPO40010515',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
'description': 'md5:c03aba1e917561eface5214020551b7a',
},
'expected_warnings': ['Video is geo restricted'],
}, {
'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
'info_dict': {
'id': 'KMTE50001317AA',
'ext': 'mp4',
'title': 'Anno 13:30',
'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa',
'duration': 2340,
'series': 'Anno',
'episode': '13:30',
'season_number': 3,
'episode_number': 13,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017',
'info_dict': {
'id': 'MUHH46000317AA',
'ext': 'mp4',
'title': 'Nytt på Nytt 27.01.2017',
'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b',
'duration': 1796,
'series': 'Nytt på nytt',
'episode': '27.01.2017',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
'only_matching': True,
}]
class NRKTVDirekteIE(NRKTVIE):
IE_DESC = 'NRK TV Direkte and NRK Radio Direkte'
_VALID_URL = r'https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://tv.nrk.no/direkte/nrk1',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/direkte/p1_oslo_akershus',
'only_matching': True,
}]
class NRKPlaylistBaseIE(InfoExtractor):
def _extract_description(self, webpage):
pass
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('nrk:%s' % video_id, NRKIE.ie_key())
for video_id in re.findall(self._ITEM_RE, webpage)
]
playlist_title = self. _extract_title(webpage)
playlist_description = self._extract_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
class NRKPlaylistIE(NRKPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)'
_ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"'
_TESTS = [{
'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763',
'info_dict': {
'id': 'gjenopplev-den-historiske-solformorkelsen-1.12270763',
'title': 'Gjenopplev den historiske solformørkelsen',
'description': 'md5:c2df8ea3bac5654a26fc2834a542feed',
},
'playlist_count': 2,
}, {
'url': 'http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449',
'info_dict': {
'id': 'rivertonprisen-til-karin-fossum-1.12266449',
'title': 'Rivertonprisen til Karin Fossum',
'description': 'Første kvinne på 15 år til å vinne krimlitteraturprisen.',
},
'playlist_count': 5,
}]
def _extract_title(self, webpage):
return self._og_search_title(webpage, fatal=False)
def _extract_description(self, webpage):
return self._og_search_description(webpage)
class NRKTVEpisodesIE(NRKPlaylistBaseIE):
_VALID_URL = r'https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)'
_ITEM_RE = r'data-episode=["\']%s' % NRKTVIE._EPISODE_RE
_TESTS = [{
'url': 'https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031',
'info_dict': {
'id': '69031',
'title': 'Nytt på nytt, sesong: 201210',
},
'playlist_count': 4,
}]
def _extract_title(self, webpage):
return self._html_search_regex(
r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
class NRKTVSeriesIE(InfoExtractor):
_VALID_URL = r'https?://(?:tv|radio)\.nrk(?:super)?\.no/serie/(?P<id>[^/]+)'
_ITEM_RE = r'(?:data-season=["\']|id=["\']season-)(?P<id>\d+)'
_TESTS = [{
'url': 'https://tv.nrk.no/serie/groenn-glede',
'info_dict': {
'id': 'groenn-glede',
'title': 'Grønn glede',
'description': 'md5:7576e92ae7f65da6993cf90ee29e4608',
},
'playlist_mincount': 9,
}, {
'url': 'http://tv.nrksuper.no/serie/labyrint',
'info_dict': {
'id': 'labyrint',
'title': 'Labyrint',
'description': 'md5:58afd450974c89e27d5a19212eee7115',
},
'playlist_mincount': 3,
}, {
'url': 'https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/saving-the-human-race',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/postmann-pat',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if NRKTVIE.suitable(url) else super(NRKTVSeriesIE, cls).suitable(url)
def _real_extract(self, url):
series_id = self._match_id(url)
webpage = self._download_webpage(url, series_id)
entries = [
self.url_result(
'https://tv.nrk.no/program/Episodes/{series}/{season}'.format(
series=series_id, season=season_id))
for season_id in re.findall(self._ITEM_RE, webpage)
]
title = self._html_search_meta(
'seriestitle', webpage,
'title', default=None) or self._og_search_title(
webpage, fatal=False)
description = self._html_search_meta(
'series_description', webpage,
'description', default=None) or self._og_search_description(webpage)
return self.playlist_result(entries, series_id, title, description)
class NRKSkoleIE(InfoExtractor):
IE_DESC = 'NRK Skole'
_VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.nrk.no/skole/?page=search&q=&mediaId=14099',
'md5': '6bc936b01f9dd8ed45bc58b252b2d9b6',
'info_dict': {
'id': '6021',
'ext': 'mp4',
'title': 'Genetikk og eneggede tvillinger',
'description': 'md5:3aca25dcf38ec30f0363428d2b265f8d',
'duration': 399,
},
}, {
'url': 'https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://mimir.nrk.no/plugin/1.0/static?mediaId=%s' % video_id,
video_id)
nrk_id = self._parse_json(
self._search_regex(
r'<script[^>]+type=["\']application/json["\'][^>]*>({.+?})</script>',
webpage, 'application json'),
video_id)['activeMedia']['psId']
return self.url_result('nrk:%s' % nrk_id)
|
"""Test configs for conv."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_conv_to_depthwiseconv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 1]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [3],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
input_tensors = [input_tensor]
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
import urllib2
from cStringIO import StringIO
import _response
class GzipConsumer:
def __init__(self, consumer):
self.__consumer = consumer
self.__decoder = None
self.__data = ""
def __getattr__(self, key):
return getattr(self.__consumer, key)
def feed(self, data):
if self.__decoder is None:
# check if we have a full gzip header
data = self.__data + data
try:
i = 10
flag = ord(data[3])
if flag & 4: # extra
x = ord(data[i]) + 256*ord(data[i+1])
i = i + 2 + x
if flag & 8: # filename
while ord(data[i]):
i = i + 1
i = i + 1
if flag & 16: # comment
while ord(data[i]):
i = i + 1
i = i + 1
if flag & 2: # crc
i = i + 2
if len(data) < i:
raise IndexError("not enough data")
if data[:3] != "\x1f\x8b\x08":
raise IOError("invalid gzip data")
data = data[i:]
except IndexError:
self.__data = data
return # need more data
import zlib
self.__data = ""
self.__decoder = zlib.decompressobj(-zlib.MAX_WBITS)
data = self.__decoder.decompress(data)
if data:
self.__consumer.feed(data)
def close(self):
if self.__decoder:
data = self.__decoder.flush()
if data:
self.__consumer.feed(data)
self.__consumer.close()
class stupid_gzip_consumer:
def __init__(self): self.data = []
def feed(self, data): self.data.append(data)
class stupid_gzip_wrapper(_response.closeable_response):
def __init__(self, response):
self._response = response
c = stupid_gzip_consumer()
gzc = GzipConsumer(c)
gzc.feed(response.read())
self.__data = StringIO("".join(c.data))
def read(self, size=-1):
return self.__data.read(size)
def readline(self, size=-1):
return self.__data.readline(size)
def readlines(self, sizehint=-1):
return self.__data.readlines(sizehint)
def __getattr__(self, name):
# delegate unknown methods/attributes
return getattr(self._response, name)
class HTTPGzipProcessor(urllib2.BaseHandler):
handler_order = 200 # response processing before HTTPEquivProcessor
def http_request(self, request):
request.add_header("Accept-Encoding", "gzip")
return request
def http_response(self, request, response):
# post-process response
enc_hdrs = response.info().getheaders("Content-encoding")
for enc_hdr in enc_hdrs:
if ("gzip" in enc_hdr) or ("compress" in enc_hdr):
return stupid_gzip_wrapper(response)
return response
https_response = http_response
|
import os
from scrapy.command import ScrapyCommand
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return "[options] <spider>"
def short_desc(self):
return "Run a spider"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
help="set spider argument (may be repeated)")
parser.add_option("-o", "--output", metavar="FILE",
help="dump scraped items into FILE (use - for stdout)")
parser.add_option("-t", "--output-format", metavar="FORMAT",
help="format to use for dumping items with -o")
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
if opts.output:
if opts.output == '-':
self.settings.set('FEED_URI', 'stdout:', priority='cmdline')
else:
self.settings.set('FEED_URI', opts.output, priority='cmdline')
valid_output_formats = (
list(self.settings.getdict('FEED_EXPORTERS').keys()) +
list(self.settings.getdict('FEED_EXPORTERS_BASE').keys())
)
if not opts.output_format:
opts.output_format = os.path.splitext(opts.output)[1].replace(".", "")
if opts.output_format not in valid_output_formats:
raise UsageError("Unrecognized output format '%s', set one"
" using the '-t' switch or as a file extension"
" from the supported list %s" % (opts.output_format,
tuple(valid_output_formats)))
self.settings.set('FEED_FORMAT', opts.output_format, priority='cmdline')
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
self.crawler_process.crawl(spname, **opts.spargs)
self.crawler_process.start()
|
from __future__ import print_function, division
from sympy.core import Basic, Integer, Tuple, Dict, S, sympify
from sympy.core.sympify import converter as sympify_converter
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.dense import DenseMatrix
from sympy.matrices.sparse import SparseMatrix, MutableSparseMatrix
from sympy.matrices.expressions import MatrixExpr
def sympify_matrix(arg):
return arg.as_immutable()
sympify_converter[MatrixBase] = sympify_matrix
class ImmutableMatrix(MatrixExpr, DenseMatrix):
"""Create an immutable version of a matrix.
Examples
========
>>> from sympy import eye
>>> from sympy.matrices import ImmutableMatrix
>>> ImmutableMatrix(eye(3))
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> _[0, 0] = 42
Traceback (most recent call last):
...
TypeError: Cannot set values of ImmutableDenseMatrix
"""
# MatrixExpr is set as NotIterable, but we want explicit matrices to be
# iterable
_iterable = True
_class_priority = 8
@classmethod
def _new(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], ImmutableMatrix):
return args[0]
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
rows = Integer(rows)
cols = Integer(cols)
mat = Tuple(*flat_list)
return Basic.__new__(cls, rows, cols, mat)
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
@property
def shape(self):
return tuple([int(i) for i in self.args[:2]])
@property
def _mat(self):
return list(self.args[2])
def _entry(self, i, j):
return DenseMatrix.__getitem__(self, (i, j))
__getitem__ = DenseMatrix.__getitem__
def __setitem__(self, *args):
raise TypeError("Cannot set values of ImmutableMatrix")
def _eval_Eq(self, other):
"""Helper method for Equality with matrices.
Relational automatically converts matrices to ImmutableMatrix
instances, so this method only applies here. Returns True if the
matrices are definitively the same, False if they are definitively
different, and None if undetermined (e.g. if they contain Symbols).
Returning None triggers default handling of Equalities.
"""
if not hasattr(other, 'shape') or self.shape != other.shape:
return S.false
if isinstance(other, MatrixExpr) and not isinstance(
other, ImmutableMatrix):
return None
diff = self - other
return sympify(diff.is_zero)
adjoint = MatrixBase.adjoint
conjugate = MatrixBase.conjugate
# C and T are defined in MatrixExpr...I don't know why C alone
# needs to be defined here
C = MatrixBase.C
as_mutable = DenseMatrix.as_mutable
_eval_trace = DenseMatrix._eval_trace
_eval_transpose = DenseMatrix._eval_transpose
_eval_conjugate = DenseMatrix._eval_conjugate
_eval_adjoint = DenseMatrix._eval_adjoint
_eval_inverse = DenseMatrix._eval_inverse
_eval_simplify = DenseMatrix._eval_simplify
_eval_diff = DenseMatrix._eval_diff
equals = DenseMatrix.equals
is_Identity = DenseMatrix.is_Identity
__add__ = MatrixBase.__add__
__radd__ = MatrixBase.__radd__
__mul__ = MatrixBase.__mul__
__matmul__ = MatrixBase.__matmul__
__rmul__ = MatrixBase.__rmul__
__rmatmul__ = MatrixBase.__rmatmul__
__pow__ = MatrixBase.__pow__
__sub__ = MatrixBase.__sub__
__rsub__ = MatrixBase.__rsub__
__neg__ = MatrixBase.__neg__
__div__ = MatrixBase.__div__
__truediv__ = MatrixBase.__truediv__
ImmutableMatrix.is_zero = DenseMatrix.is_zero
class ImmutableSparseMatrix(Basic, SparseMatrix):
"""Create an immutable version of a sparse matrix.
Examples
========
>>> from sympy import eye
>>> from sympy.matrices.immutable import ImmutableSparseMatrix
>>> ImmutableSparseMatrix(1, 1, {})
Matrix([[0]])
>>> ImmutableSparseMatrix(eye(3))
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> _[0, 0] = 42
Traceback (most recent call last):
...
TypeError: Cannot set values of ImmutableSparseMatrix
>>> _.shape
(3, 3)
"""
_class_priority = 9
@classmethod
def _new(cls, *args, **kwargs):
s = MutableSparseMatrix(*args)
rows = Integer(s.rows)
cols = Integer(s.cols)
mat = Dict(s._smat)
obj = Basic.__new__(cls, rows, cols, mat)
obj.rows = s.rows
obj.cols = s.cols
obj._smat = s._smat
return obj
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
def __setitem__(self, *args):
raise TypeError("Cannot set values of ImmutableSparseMatrix")
subs = MatrixBase.subs
xreplace = MatrixBase.xreplace
def __hash__(self):
return hash((type(self).__name__,) + (self.shape, tuple(self._smat)))
_eval_Eq = ImmutableMatrix._eval_Eq
|
"""
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '{{ secret_key }}'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrrp
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VRRP configuration on NX-OS switches.
description:
- Manages VRRP configuration on NX-OS switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- VRRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- C(state=absent) removes the VRRP group if it exists on the device.
- VRRP cannot be configured on loopback interfaces.
options:
group:
description:
- VRRP group number.
required: true
interface:
description:
- Full name of interface that is being managed for VRRP.
required: true
interval:
description:
- Time interval between advertisement or 'default' keyword
required: false
default: 1
version_added: 2.6
priority:
description:
- VRRP priority or 'default' keyword
default: 100
preempt:
description:
- Enable/Disable preempt.
type: bool
default: 'yes'
vip:
description:
- VRRP virtual IP address or 'default' keyword
authentication:
description:
- Clear text authentication string or 'default' keyword
admin_state:
description:
- Used to enable or disable the VRRP process.
choices: ['shutdown', 'no shutdown', 'default']
default: shutdown
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrrp group 100 and vip 10.1.100.1 is on vlan10
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
- name: Ensure removal of the vrrp group config
# vip is required to ensure the user knows what they are removing
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
state: absent
- name: Re-config with more params
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
preempt: false
priority: 130
authentication: AUTHKEY
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "vrrp 150", "address 10.1.15.1",
"authentication text testing", "no shutdown"]
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {
'priority': '100',
'interval': '1',
'vip': '0.0.0.0',
'admin_state': 'shutdown',
}
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
commands = [{
'command': command,
'output': output,
}]
return run_commands(module, commands)[0]
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
body = execute_show_command(command, module)
interface_table = body['TABLE_interface']['ROW_interface']
name = interface_table.get('interface')
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode, name
def get_vrr_status(group, module, interface):
command = 'show run all | section interface.{0}$'.format(interface)
body = execute_show_command(command, module)
vrf_index = None
admin_state = 'shutdown'
if body:
splitted_body = body.splitlines()
for index in range(0, len(splitted_body) - 1):
if splitted_body[index].strip() == 'vrrp {0}'.format(group):
vrf_index = index
vrf_section = splitted_body[vrf_index::]
for line in vrf_section:
if line.strip() == 'no shutdown':
admin_state = 'no shutdown'
break
return admin_state
def get_existing_vrrp(interface, group, module, name):
command = 'show vrrp detail interface {0}'.format(interface)
body = execute_show_command(command, module)
vrrp = {}
vrrp_key = {
'sh_group_id': 'group',
'sh_vip_addr': 'vip',
'sh_priority': 'priority',
'sh_group_preempt': 'preempt',
'sh_auth_text': 'authentication',
'sh_adv_interval': 'interval'
}
try:
vrrp_table = body['TABLE_vrrp_group']
except (AttributeError, IndexError, TypeError):
return {}
if isinstance(vrrp_table, dict):
vrrp_table = [vrrp_table]
for each_vrrp in vrrp_table:
vrrp_row = each_vrrp['ROW_vrrp_group']
parsed_vrrp = apply_key_map(vrrp_key, vrrp_row)
if parsed_vrrp['preempt'] == 'Disable':
parsed_vrrp['preempt'] = False
elif parsed_vrrp['preempt'] == 'Enable':
parsed_vrrp['preempt'] = True
if parsed_vrrp['group'] == group:
parsed_vrrp['admin_state'] = get_vrr_status(group, module, name)
return parsed_vrrp
return vrrp
def get_commands_config_vrrp(delta, existing, group):
commands = []
CMDS = {
'priority': 'priority {0}',
'preempt': 'preempt',
'vip': 'address {0}',
'interval': 'advertisement-interval {0}',
'auth': 'authentication text {0}',
'admin_state': '{0}',
}
for arg in ['vip', 'priority', 'interval', 'admin_state']:
val = delta.get(arg)
if val == 'default':
val = PARAM_TO_DEFAULT_KEYMAP.get(arg)
if val != existing.get(arg):
commands.append((CMDS.get(arg)).format(val))
elif val:
commands.append((CMDS.get(arg)).format(val))
preempt = delta.get('preempt')
auth = delta.get('authentication')
if preempt:
commands.append(CMDS.get('preempt'))
elif preempt is False:
commands.append('no ' + CMDS.get('preempt'))
if auth:
if auth != 'default':
commands.append((CMDS.get('auth')).format(auth))
elif existing.get('authentication'):
commands.append('no authentication')
if commands:
commands.insert(0, 'vrrp {0}'.format(group))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(param, module):
value = module.params[param]
if param == 'group':
try:
if (int(value) < 1 or int(value) > 255):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'group' must be an integer between"
" 1 and 255", group=value)
elif param == 'priority':
try:
if (int(value) < 1 or int(value) > 254):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'priority' must be an integer "
"between 1 and 254", priority=value)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
interval=dict(required=False, type='str'),
priority=dict(required=False, type='str'),
preempt=dict(required=False, type='bool'),
vip=dict(required=False, type='str'),
admin_state=dict(required=False, type='str',
choices=['shutdown', 'no shutdown', 'default'],
default='shutdown'),
authentication=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], required=False, default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = {'changed': False, 'commands': [], 'warnings': warnings}
state = module.params['state']
interface = module.params['interface'].lower()
group = module.params['group']
priority = module.params['priority']
interval = module.params['interval']
preempt = module.params['preempt']
vip = module.params['vip']
authentication = module.params['authentication']
admin_state = module.params['admin_state']
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if state == 'present' and not vip:
module.fail_json(msg='the "vip" param is required when state=present')
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and network_api == 'cliconf'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg='That interface does not exist yet. Create '
'it first.', interface=interface)
if intf_type == 'loopback':
module.fail_json(msg="Loopback interfaces don't support VRRP.",
interface=interface)
mode, name = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
args = dict(group=group, priority=priority, preempt=preempt,
vip=vip, authentication=authentication, interval=interval,
admin_state=admin_state)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_existing_vrrp(interface, group, module, name)
changed = False
end_state = existing
commands = []
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta:
command = get_commands_config_vrrp(delta, existing, group)
if command:
commands.append(command)
elif state == 'absent':
if existing:
commands.append(['no vrrp {0}'.format(group)])
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
commands = flatten_list(commands)
results['commands'] = commands
results['changed'] = True
if not module.check_mode:
load_config(module, commands)
if 'configure' in commands:
commands.pop(0)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
from . import purchase
from . import sale
|
ENCODING = 'latin-1'
STX = b'\x02'
ETX = b'\x03'
EOT = b'\x04'
ENQ = b'\x05'
ACK = b'\x06'
NAK = b'\x15'
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
CRLF = CR + LF
RECORD_SEP = b'\x0D' # \r #
FIELD_SEP = b'\x7C' # | #
REPEAT_SEP = b'\x5C' # \ #
COMPONENT_SEP = b'\x5E' # ^ #
ESCAPE_SEP = b'\x26' # & #
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils._text import to_native
try:
import ldap
import ldap.sasl
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
def gen_specs(**specs):
specs.update({
'bind_dn': dict(),
'bind_pw': dict(default='', no_log=True),
'dn': dict(required=True),
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'validate_certs': dict(default=True, type='bool'),
})
return specs
class LdapGeneric(object):
def __init__(self, module):
# Shortcuts
self.module = module
self.bind_dn = self.module.params['bind_dn']
self.bind_pw = self.module.params['bind_pw']
self.dn = self.module.params['dn']
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.verify_cert = self.module.params['validate_certs']
# Establish connection
self.connection = self._connect_to_ldap()
def fail(self, msg, exn):
self.module.fail_json(
msg=msg,
details=to_native(exn),
exception=traceback.format_exc()
)
def _connect_to_ldap(self):
if not self.verify_cert:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
connection = ldap.initialize(self.server_uri)
if self.start_tls:
try:
connection.start_tls_s()
except ldap.LDAPError as e:
self.fail("Cannot start TLS.", e)
try:
if self.bind_dn is not None:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
connection.sasl_interactive_bind_s('', ldap.sasl.external())
except ldap.LDAPError as e:
self.fail("Cannot bind to the server.", e)
return connection
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_regkey_license_assignment
short_description: Manage regkey license assignment on BIG-IPs from a BIG-IQ
description:
- Manages the assignment of regkey licenses on a BIG-IQ. Assignment means that
the license is assigned to a BIG-IP, or, it needs to be assigned to a BIG-IP.
Additionally, this module supported revoking the assignments from BIG-IP devices.
version_added: 2.6
options:
pool:
description:
- The registration key pool to use.
required: True
key:
description:
- The registration key that you want to assign from the pool.
required: True
device:
description:
- When C(managed) is C(no), specifies the address, or hostname, where the BIG-IQ
can reach the remote device to register.
- When C(managed) is C(yes), specifies the managed device, or device UUID, that
you want to register.
- If C(managed) is C(yes), it is very important that you do not have more than
one device with the same name. BIG-IQ internally recognizes devices by their ID,
and therefore, this module's cannot guarantee that the correct device will be
registered. The device returned is the device that will be used.
managed:
description:
- Whether the specified device is a managed or un-managed device.
- When C(state) is C(present), this parameter is required.
type: bool
device_port:
description:
- Specifies the port of the remote device to connect to.
- If this parameter is not specified, the default of C(443) will be used.
default: 443
device_username:
description:
- The username used to connect to the remote device.
- This username should be one that has sufficient privileges on the remote device
to do licensing. Usually this is the C(Administrator) role.
- When C(managed) is C(no), this parameter is required.
device_password:
description:
- The password of the C(device_username).
- When C(managed) is C(no), this parameter is required.
state:
description:
- When C(present), ensures that the device is assigned the specified license.
- When C(absent), ensures the license is revokes from the remote device and freed
on the BIG-IQ.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Register an unmanaged device
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: 1.1.1.1
managed: no
device_username: admin
device_password: secret
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Register a managed device, by name
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: bigi1.foo.com
managed: yes
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Register a managed device, by UUID
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: 7141a063-7cf8-423f-9829-9d40599fa3e0
managed: yes
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
'''
import re
import time
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'deviceReference': 'device_reference',
'deviceAddress': 'device_address',
'httpsPort': 'device_port'
}
api_attributes = [
'deviceReference', 'deviceAddress', 'httpsPort', 'managed'
]
returnables = [
'device_address', 'device_reference', 'device_username', 'device_password',
'device_port', 'managed'
]
updatables = [
'device_reference', 'device_address', 'device_username', 'device_password',
'device_port', 'managed'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def device_password(self):
if self._values['device_password'] is None:
return None
return self._values['device_password']
@property
def device_username(self):
if self._values['device_username'] is None:
return None
return self._values['device_username']
@property
def device_address(self):
if self.device_is_address:
return self._values['device']
@property
def device_port(self):
if self._values['device_port'] is None:
return None
return int(self._values['device_port'])
@property
def device_is_address(self):
if is_valid_ip(self.device):
return True
return False
@property
def device_is_id(self):
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, self.device):
return True
return False
@property
def device_is_name(self):
if not self.device_is_address and not self.device_is_id:
return True
return False
@property
def device_reference(self):
if not self.managed:
return None
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "address+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "hostname+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "uuid+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/?$filter={2}&$top=1".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No device with the specified address was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
id = response['items'][0]['uuid']
result = dict(
link='https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/{0}'.format(id)
)
return result
@property
def pool_id(self):
filter = "(name eq '{0}')".format(self.pool)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses?$filter={2}&$top=1'.format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No pool with the specified name was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['items'][0]['id']
@property
def member_id(self):
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "deviceAddress+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "deviceName+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "deviceMachineId+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/?$filter={4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.pool_id,
self.key,
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = response['items'][0]['id']
return result
class Changes(Parameters):
pass
class UsableChanges(Changes):
@property
def device_port(self):
if self._values['managed']:
return None
return self._values['device_port']
@property
def device_username(self):
if self._values['managed']:
return None
return self._values['device_username']
@property
def device_password(self):
if self._values['managed']:
return None
return self._values['device_password']
@property
def device_reference(self):
if not self._values['managed']:
return None
return self._values['device_reference']
@property
def device_address(self):
if self._values['managed']:
return None
return self._values['device_address']
@property
def managed(self):
return None
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
return self.create()
def exists(self):
if self.want.member_id is None:
return False
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key,
self.want.member_id
)
resp = self.client.api.get(uri)
if resp.status == 200:
return True
return False
def remove(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
# Artificial sleeping to wait for remote licensing (on BIG-IP) to complete
#
# This should be something that BIG-IQ can do natively in 6.1-ish time.
time.sleep(60)
return True
def create(self):
self._set_changed_options()
if not self.want.managed:
if self.want.device_username is None:
raise F5ModuleError(
"You must specify a 'device_username' when working with unmanaged devices."
)
if self.want.device_password is None:
raise F5ModuleError(
"You must specify a 'device_password' when working with unmanaged devices."
)
if self.module.check_mode:
return True
self.create_on_device()
if not self.exists():
raise F5ModuleError(
"Failed to license the remote device."
)
self.wait_for_device_to_be_licensed()
# Artificial sleeping to wait for remote licensing (on BIG-IP) to complete
#
# This should be something that BIG-IQ can do natively in 6.1-ish time.
time.sleep(60)
return True
def create_on_device(self):
params = self.changes.api_params()
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key
)
if not self.want.managed:
params['username'] = self.want.device_username
params['password'] = self.want.device_password
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def wait_for_device_to_be_licensed(self):
count = 0
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key,
self.want.member_id
)
while count < 3:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] == 'LICENSED':
count += 1
else:
count = 0
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key,
self.want.member_id
)
params = {}
if not self.want.managed:
params.update(self.changes.api_params())
params['id'] = self.want.member_id
params['username'] = self.want.device_username
params['password'] = self.want.device_password
self.client.api.delete(uri, json=params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
pool=dict(required=True),
key=dict(required=True, no_log=True),
device=dict(required=True),
managed=dict(type='bool'),
device_port=dict(type='int', default=443),
device_username=dict(no_log=True),
device_password=dict(no_log=True),
state=dict(default='present', choices=['absent', 'present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['key', 'managed']],
['managed', False, ['device', 'device_username', 'device_password']],
['managed', True, ['device']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
import fileinput
import re
import sys
WARNING = '\033[93m'
ENDC = '\033[0m'
def myprint(*args):
print(WARNING, filename + ":", ENDC,*args)
def yield_line_and_islastline(f):
global filename
global linenumber
try:
prevline = next(f)
filename = fileinput.filename()
linenumber = fileinput.filelineno()
except StopIteration:
return
for line in f:
yield (prevline, f.isfirstline())
filename = fileinput.filename()
linenumber = fileinput.filelineno()
prevline = line
yield prevline, True
pattern = re.compile(r'(<<(\/?) *(if|for|else|switch|case|replace|link)[^<>]*)')
tagfound = []
try:
for line, isLastLine in yield_line_and_islastline(fileinput.input()):
for (whole,end,tag) in re.findall(pattern,line):
if tag == "else" or tag == 'case':
if len(tagfound) == 0:
myprint("Found", tag, "but with no opening tag:")
myprint(" ", linenumber,":", whole)
fileinput.nextfile()
lasttag = tagfound[-1]
if (tag == "else" and lasttag["tag"] != "if") or (tag == "case" and lasttag["tag"] != "switch"):
myprint("Mismatched else: Opening tag was:")
myprint(" ",lasttag["linenumber"],":", lasttag["whole"])
myprint("But this tag was:")
myprint(" ",linenumber,":", whole)
fileinput.nextfile()
break
elif end != '/':
tagfound.append({"whole": whole, "linenumber":linenumber,"tag":tag})
else:
if len(tagfound) == 0:
myprint("Found closing tag but with no opening tag:")
myprint(" ", linenumber,":", whole)
fileinput.nextfile()
break
lasttag = tagfound.pop()
if lasttag["tag"] != tag:
myprint("Mismatched tag: Opening tag was:")
myprint(" ",lasttag["linenumber"],":", lasttag["whole"])
myprint("Closing tag was:")
myprint(" ",linenumber,":", whole)
fileinput.nextfile()
break
if isLastLine:
if len(tagfound) != 0:
myprint("End of file found but", len(tagfound), ("tag hasn't" if len(tagfound)==1 else "tags haven't"), "been closed:")
for tag in tagfound:
myprint(" ", tag["linenumber"],":", tag["whole"])
tagfound = []
except UnicodeDecodeError as e:
myprint(e)
print(" Hint: In linux, you can get more details about unicode errors by running:")
print(" isutf8", filename)
|
import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Token(models.Model):
"""
The default authorization token model.
"""
key = models.CharField(_("Key"), max_length=40, primary_key=True)
user = models.OneToOneField(
settings.AUTH_USER_MODEL, related_name='auth_token',
on_delete=models.CASCADE, verbose_name=_("User")
)
created = models.DateTimeField(_("Created"), auto_now_add=True)
class Meta:
# Work around for a bug in Django:
# https://code.djangoproject.com/ticket/19422
#
# Also see corresponding ticket:
# https://github.com/tomchristie/django-rest-framework/issues/705
abstract = 'rest_framework.authtoken' not in settings.INSTALLED_APPS
verbose_name = _("Token")
verbose_name_plural = _("Tokens")
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20)).decode()
def __str__(self):
return self.key
|
__author__ = 'rochacbruno'
|
from pgi.clib.gir import GITypeTag, GIDirection, GITransfer, GIInfoType
class CallbackArgument(object):
TAG = None
is_aux = False
py_type = None
def __init__(self, backend, info, type_, name):
self.info = info
self.name = name
self.backend = backend
self.type = type_
@classmethod
def get_class(cls, type_):
return cls
def setup(self):
pass
def process(self):
return None, self.name
def is_direction_in(self):
return self.direction in (GIDirection.INOUT, GIDirection.IN)
def is_direction_out(self):
return self.direction in (GIDirection.INOUT, GIDirection.OUT)
def is_direction_inout(self):
return self.direction == GIDirection.INOUT
def transfer_nothing(self):
return self.info.ownership_transfer.value == GITransfer.NOTHING
def transfer_container(self):
return self.info.ownership_transfer.value == GITransfer.CONTAINER
def transfer_everything(self):
return self.info.ownership_transfer.value == GITransfer.EVERYTHING
class BaseInterfaceArgument(CallbackArgument):
TAG = GITypeTag.INTERFACE
py_type = object
@classmethod
def get_class(cls, type_):
iface = type_.get_interface()
iface_type = iface.type.value
if iface_type == GIInfoType.STRUCT:
return StructArgument
elif iface_type == GIInfoType.OBJECT:
return ObjectArgument
elif iface_type == GIInfoType.UNION:
return UnionArgument
elif iface_type == GIInfoType.FLAGS:
return FlagsArgument
elif iface_type == GIInfoType.ENUM:
return EnumArgument
raise NotImplementedError("Unsupported interface type %r" % iface.type)
def process(self):
var = self.backend.get_type(self.type)
out = var.unpack_return(self.name)
return var.block, out
class FlagsArgument(BaseInterfaceArgument):
pass
class EnumArgument(BaseInterfaceArgument):
pass
class ObjectArgument(BaseInterfaceArgument):
pass
class StructArgument(BaseInterfaceArgument):
pass
class UnionArgument(BaseInterfaceArgument):
pass
class Utf8Argument(CallbackArgument):
TAG = GITypeTag.UTF8
py_type = str
class BooleanArgument(CallbackArgument):
TAG = GITypeTag.BOOLEAN
py_type = bool
class Int64Argument(CallbackArgument):
TAG = GITypeTag.INT64
py_type = int
class Int32Argument(CallbackArgument):
TAG = GITypeTag.INT32
py_type = int
class UInt64Argument(CallbackArgument):
TAG = GITypeTag.UINT64
py_type = int
class VoidArgument(CallbackArgument):
TAG = GITypeTag.VOID
py_type = int
_classes = {}
def _find_cbargs():
global _classes
for var in globals().values():
if not isinstance(var, type):
continue
if issubclass(var, CallbackArgument) and var is not CallbackArgument:
_classes[var.TAG] = var
_find_cbargs()
def get_cbarg_class(arg_type):
global _classes
tag_value = arg_type.tag.value
try:
cls = _classes[tag_value]
except KeyError:
raise NotImplementedError(
"%r signal argument not implemented" % arg_type.tag)
else:
return cls.get_class(arg_type)
|
"""
Views for Instances and Volumes.
"""
from horizon import tabs
from openstack_dashboard.dashboards.project.access_and_security \
import tabs as project_tabs
class IndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.AccessAndSecurityTabs
template_name = 'project/access_and_security/index.html'
|
from __future__ import division, print_function, absolute_import
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_almost_equal, assert_, assert_raises, assert_allclose
import numpy as np
from scipy.linalg import _flapack as flapack
from scipy.linalg import inv
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
class TestFlapackSimple(TestCase):
def test_gebal(self):
a = [[1,2,3],[4,5,6],[7,8,9]]
a1 = [[1,0,0,3e-4],
[4,0,0,2e-3],
[7,1,0,0],
[0,1,0,0]]
for p in 'sdzc':
f = getattr(flapack,p+'gebal',None)
if f is None:
continue
ba,lo,hi,pivscale,info = f(a)
assert_(not info,repr(info))
assert_array_almost_equal(ba,a)
assert_equal((lo,hi),(0,len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba,lo,hi,pivscale,info = f(a1,permute=1,scale=1)
assert_(not info,repr(info))
# print a1
# print ba,lo,hi,pivscale
def test_gehrd(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack,p+'gehrd',None)
if f is None:
continue
ht,tau,info = f(a)
assert_(not info,repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1), scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1), scale * c1, decimal=4)
class TestLapack(TestCase):
def test_flapack(self):
if hasattr(flapack,'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack,'empty_module'):
# clapack module is empty
pass
class TestRegression(TestCase):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(TestCase):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
if __name__ == "__main__":
run_module_suite()
|
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.platform import test
class StackOpTest(xla_test.XLATestCase):
def testStackPushPop(self):
with self.cached_session(), self.test_scope():
size = array_ops.placeholder(dtypes.int32)
v = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(size, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval({size: 5, v: [[4.0, 5.0]]}))
def testStackPushPopSwap(self):
with self.cached_session(), self.test_scope():
a = np.arange(2000)
x = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, c1.eval({x: a}))
def testMultiStack(self):
with self.cached_session(), self.test_scope():
v = array_ops.placeholder(dtypes.float32)
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval({v: 4.0}))
def testSameNameStacks(self):
"""Different stacks with the same name do not interfere."""
with self.cached_session() as sess, self.test_scope():
v1 = array_ops.placeholder(dtypes.float32)
v2 = array_ops.placeholder(dtypes.float32)
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v1)
with ops.control_dependencies([c1]):
c2 = gen_data_flow_ops.stack_push_v2(h2, v2)
with ops.control_dependencies([c2]):
pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
out1, out2 = sess.run([pop1, pop2], {v1: 4.0, v2: 5.0})
self.assertAllClose(out1, 4.0)
self.assertAllClose(out2, 5.0)
def testCloseStack(self):
with self.cached_session() as sess, self.test_scope():
size = array_ops.placeholder(dtypes.int32)
h = gen_data_flow_ops.stack_v2(size, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1, {size: 5})
def testPushCloseStack(self):
with self.cached_session() as sess, self.test_scope():
v = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1, {v: [[4.0, 5.0]]})
if __name__ == "__main__":
test.main()
|
"""
Strongly connected components.
"""
import networkx as nx
__authors__ = "\n".join(['Eben Kenah',
'Aric Hagberg (hagberg@lanl.gov)'
'Christopher Ellison',
'Ben Edwards (bedwards@cs.unm.edu)'])
__all__ = ['number_strongly_connected_components',
'strongly_connected_components',
'strongly_connected_component_subgraphs',
'is_strongly_connected',
'strongly_connected_components_recursive',
'kosaraju_strongly_connected_components',
'condensation']
def strongly_connected_components(G):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError: If G is undirected.
See Also
--------
connected_components, weakly_connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
Nonrecursive version of algorithm.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
preorder={}
lowlink={}
scc_found={}
scc_queue = []
scc_list=[]
i=0 # Preorder counter
for source in G:
if source not in scc_found:
queue=[source]
while queue:
v=queue[-1]
if v not in preorder:
i=i+1
preorder[v]=i
done=1
v_nbrs=G[v]
for w in v_nbrs:
if w not in preorder:
queue.append(w)
done=0
break
if done==1:
lowlink[v]=preorder[v]
for w in v_nbrs:
if w not in scc_found:
if preorder[w]>preorder[v]:
lowlink[v]=min([lowlink[v],lowlink[w]])
else:
lowlink[v]=min([lowlink[v],preorder[w]])
queue.pop()
if lowlink[v]==preorder[v]:
scc_found[v]=True
scc=[v]
while scc_queue and preorder[scc_queue[-1]]>preorder[v]:
k=scc_queue.pop()
scc_found[k]=True
scc.append(k)
scc_list.append(scc)
else:
scc_queue.append(v)
scc_list.sort(key=len,reverse=True)
return scc_list
def kosaraju_strongly_connected_components(G,source=None):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError: If G is undirected
See Also
--------
connected_components
Notes
-----
Uses Kosaraju's algorithm.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
components=[]
G=G.reverse(copy=False)
post=list(nx.dfs_postorder_nodes(G,source=source))
G=G.reverse(copy=False)
seen={}
while post:
r=post.pop()
if r in seen:
continue
c=nx.dfs_preorder_nodes(G,r)
new=[v for v in c if v not in seen]
seen.update([(u,True) for u in new])
components.append(new)
components.sort(key=len,reverse=True)
return components
def strongly_connected_components_recursive(G):
"""Return nodes in strongly connected components of graph.
Recursive version of algorithm.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError : If G is undirected
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
def visit(v,cnt):
root[v]=cnt
visited[v]=cnt
cnt+=1
stack.append(v)
for w in G[v]:
if w not in visited: visit(w,cnt)
if w not in component:
root[v]=min(root[v],root[w])
if root[v]==visited[v]:
component[v]=root[v]
tmpc=[v] # hold nodes in this component
while stack[-1]!=v:
w=stack.pop()
component[w]=root[v]
tmpc.append(w)
stack.remove(v)
scc.append(tmpc) # add to scc list
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
scc=[]
visited={}
component={}
root={}
cnt=0
stack=[]
for source in G:
if source not in visited:
visit(source,cnt)
scc.sort(key=len,reverse=True)
return scc
def strongly_connected_component_subgraphs(G):
"""Return strongly connected components as subgraphs.
Parameters
----------
G : NetworkX Graph
A graph.
Returns
-------
glist : list
A list of graphs, one for each strongly connected component of G.
See Also
--------
connected_component_subgraphs
Notes
-----
The list is ordered from largest strongly connected component to smallest.
Graph, node, and edge attributes are copied to the subgraphs.
"""
cc=strongly_connected_components(G)
graph_list=[]
for c in cc:
graph_list.append(G.subgraph(c).copy())
return graph_list
def number_strongly_connected_components(G):
"""Return number of strongly connected components in graph.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of strongly connected components
See Also
--------
connected_components
Notes
-----
For directed graphs only.
"""
return len(strongly_connected_components(G))
def is_strongly_connected(G):
"""Test directed graph for strong connectivity.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is strongly connected, False otherwise.
See Also
--------
strongly_connected_components
Notes
-----
For directed graphs only.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if len(G)==0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(strongly_connected_components(G)[0])==len(G)
def condensation(G, scc=None):
"""Returns the condensation of G.
The condensation of G is the graph with each of the strongly connected
components contracted into a single node.
Parameters
----------
G : NetworkX DiGraph
A directed graph.
scc: list (optional, default=None)
A list of strongly connected components. If provided, the elements in
`scc` must partition the nodes in `G`. If not provided, it will be
calculated as scc=nx.strongly_connected_components(G).
Returns
-------
C : NetworkX DiGraph
The condensation of G. The node labels are integers corresponding
to the index of the component in the list of strongly connected
components.
Raises
------
NetworkXError: If G is not directed
Notes
-----
After contracting all strongly connected components to a single node,
the resulting graph is a directed acyclic graph.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if scc is None:
scc = nx.strongly_connected_components(G)
mapping = {}
C = nx.DiGraph()
for i,component in enumerate(scc):
for n in component:
mapping[n] = i
C.add_nodes_from(range(len(scc)))
for u,v in G.edges():
if mapping[u] != mapping[v]:
C.add_edge(mapping[u],mapping[v])
return C
|
def test():
print noname
def foo():
try:
test()
except ValueError:
raise RuntimeError("Accessing a undefined name should raise a NameError")
|
"""
Split Test Block Transformer
"""
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer, FilteringTransformerMixin
class SplitTestTransformer(FilteringTransformerMixin, BlockStructureTransformer):
"""
A nested transformer of the UserPartitionTransformer that honors the
block structure pathways created by split_test modules.
To avoid code duplication, the implementation transforms its block
access representation to the representation used by user_partitions.
Namely, the 'group_id_to_child' field on a split_test module is
transformed into the, now standard, 'group_access' fields in the
split_test module's children.
The implementation therefore relies on the UserPartitionTransformer
to actually enforce the access using the 'user_partitions' and
'group_access' fields.
"""
VERSION = 1
@classmethod
def name(cls):
"""
Unique identifier for the transformer's class;
same identifier used in setup.py.
"""
return "split_test"
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this
transformer's transform method.
"""
root_block = block_structure.get_xblock(block_structure.root_block_usage_key)
user_partitions = getattr(root_block, 'user_partitions', [])
for block_key in block_structure.topological_traversal(
filter_func=lambda block_key: block_key.block_type == 'split_test',
yield_descendants_of_unyielded=True,
):
xblock = block_structure.get_xblock(block_key)
partition_for_this_block = next(
(
partition for partition in user_partitions
if partition.id == xblock.user_partition_id
),
None
)
if not partition_for_this_block:
continue
# Create dict of child location to group_id, using the
# group_id_to_child field on the split_test module.
child_to_group = {
xblock.group_id_to_child.get(unicode(group.id), None): group.id
for group in partition_for_this_block.groups
}
# Set group access for each child using its group_access
# field so the user partitions transformer enforces it.
for child_location in xblock.children:
child = block_structure.get_xblock(child_location)
group = child_to_group.get(child_location, None)
child.group_access[partition_for_this_block.id] = [group] if group is not None else []
def transform_block_filters(self, usage_info, block_structure):
"""
Mutates block_structure based on the given usage_info.
"""
# The UserPartitionTransformer will enforce group access, so
# go ahead and remove all extraneous split_test modules.
return [
block_structure.create_removal_filter(
lambda block_key: block_key.block_type == 'split_test',
keep_descendants=True,
)
]
|
from odoo import http
from odoo.exceptions import AccessError
from odoo.http import request
class HrOrgChartController(http.Controller):
_managers_level = 2 # FP request
def _prepare_employee_data(self, employee):
job = employee.sudo().job_id
return dict(
id=employee.id,
name=employee.name,
link='/mail/view?model=hr.employee&res_id=%s' % employee.id,
job_id=job.id,
job_name=job.name or '',
direct_sub_count=len(employee.child_ids),
indirect_sub_count=employee.child_all_count,
)
@http.route('/hr/get_org_chart', type='json', auth='user')
def get_org_chart(self, employee_id):
if not employee_id: # to check
return {}
employee_id = int(employee_id)
Employee = request.env['hr.employee']
# check and raise
if not Employee.check_access_rights('read', raise_exception=False):
return {}
try:
Employee.browse(employee_id).check_access_rule('read')
except AccessError:
return {}
else:
employee = Employee.browse(employee_id)
# compute employee data for org chart
ancestors, current = request.env['hr.employee'], employee
while current.parent_id:
ancestors += current.parent_id
current = current.parent_id
values = dict(
self=self._prepare_employee_data(employee),
managers=[self._prepare_employee_data(ancestor) for idx, ancestor in enumerate(ancestors) if idx < self._managers_level],
managers_more=len(ancestors) > self._managers_level,
children=[self._prepare_employee_data(child) for child in employee.child_ids],
)
values['managers'].reverse()
return values
|
import os
import optparse
import sys
import unittest
USAGE = """%prog sdk_path test_path webtest_path
Run unit tests for App Engine apps.
sdk_path Path to the SDK installation.
test_path Path to package containing test modules.
webtest_path Path to the webtest library."""
def _WebTestIsInstalled():
try:
import webtest
return True
except ImportError:
print 'You need to install webtest dependencies before you can proceed '
print 'running the tests. To do this you need to get easy_install since '
print 'that is how webtest provisions its dependencies.'
print 'See https://pythonhosted.org/setuptools/easy_install.html.'
print 'Then:'
print 'cd webtest-master'
print 'python setup.py install'
print '(Prefix with sudo / run in admin shell as necessary).'
return False
def main(sdk_path, test_path, webtest_path):
if not os.path.exists(sdk_path):
return 'Missing %s: try grunt shell:getPythonTestDeps.' % sdk_path
if not os.path.exists(test_path):
return 'Missing %s: try grunt build.' % test_path
if not os.path.exists(webtest_path):
return 'Missing %s: try grunt shell:getPythonTestDeps.' % webtest_path
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
sys.path.append(webtest_path)
if not _WebTestIsInstalled():
return 1
suite = unittest.loader.TestLoader().discover(test_path,
pattern="*test.py")
ok = unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
return 0 if ok else 1
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Error: Exactly 3 arguments required.')
sdk_path, test_path, webtest_path = args[0:3]
sys.exit(main(sdk_path, test_path, webtest_path))
|
import mraa
u = mraa.Uart(0)
print u.getDevicePath()
|
"""Tests for cross entropy related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
exp = math.exp
log = math.log
class SigmoidCrossEntropyWithLogitsTest(tf.test.TestCase):
def _SigmoidCrossEntropyWithLogits(self, logits, targets):
assert len(logits) == len(targets)
pred = [1 / (1 + exp(-x)) for x in logits]
eps = 0.0001
pred = [min(max(p, eps), 1 - eps) for p in pred]
return [-z * log(y) - (1 - z) * log(1 - y) for y, z in zip(pred, targets)]
def _Inputs(self, x=None, y=None, dtype=tf.float64, sizes=None):
x = [-100, -2, -2, 0, 2, 2, 2, 100] if x is None else x
y = [0, 0, 1, 0, 0, 1, 0.5, 1] if y is None else y
assert len(x) == len(y)
sizes = sizes if sizes else [len(x)]
logits = tf.constant(x, shape=sizes, dtype=dtype, name="logits")
targets = tf.constant(y, shape=sizes, dtype=dtype, name="targets")
losses = np.array(self._SigmoidCrossEntropyWithLogits(x, y)).reshape(*sizes)
return logits, targets, losses
def testConstructionNamed(self):
with self.test_session():
logits, targets, _ = self._Inputs()
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits,
targets,
name="mylogistic")
self.assertEqual("mylogistic", loss.op.name)
def testLogisticOutput(self):
for use_gpu in [True, False]:
for dtype in [tf.float32, tf.float16]:
with self.test_session(use_gpu=use_gpu):
logits, targets, losses = self._Inputs(dtype=dtype)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, targets)
np_loss = np.array(losses).astype(np.float32)
tf_loss = loss.eval()
self.assertAllClose(np_loss, tf_loss, atol=0.001)
def testLogisticOutputMultiDim(self):
for use_gpu in [True, False]:
for dtype in [tf.float32, tf.float16]:
with self.test_session(use_gpu=use_gpu):
logits, targets, losses = self._Inputs(dtype=dtype, sizes=[2, 2, 2])
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, targets)
np_loss = np.array(losses).astype(np.float32)
tf_loss = loss.eval()
self.assertAllClose(np_loss, tf_loss, atol=0.001)
def testGradient(self):
sizes = [4, 2]
with self.test_session():
logits, targets, _ = self._Inputs(sizes=sizes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, targets)
err = tf.test.compute_gradient_error(logits, sizes, loss, sizes)
print("logistic loss gradient err = ", err)
self.assertLess(err, 1e-7)
def testGradientAtZero(self):
with self.test_session():
logits = tf.constant([0.0, 0.0], dtype=tf.float64)
targets = tf.constant([0.0, 1.0], dtype=tf.float64)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, targets)
grads = tf.gradients(loss, logits)[0].eval()
self.assertAllClose(grads, [0.5, -0.5])
def testShapeError(self):
with self.assertRaisesRegexp(ValueError, "must have the same shape"):
tf.nn.sigmoid_cross_entropy_with_logits([[2, 1]], [1, 2, 3])
class WeightedCrossEntropyTest(tf.test.TestCase):
def _WeightedCrossEntropy(self, logits, targets, pos_coeff):
assert len(logits) == len(targets)
pred = [1 / (1 + exp(-x)) for x in logits]
eps = 0.0001
pred = [min(max(p, eps), 1 - eps) for p in pred]
return [-z * pos_coeff * log(y) - (1 - z) * log(1 - y)
for y, z in zip(pred, targets)]
def _Inputs(self, x=None, y=None, q=3.0, dtype=tf.float64, sizes=None):
x = [-100, -2, -2, 0, 2, 2, 2, 100] if x is None else x
y = [0, 0, 1, 0, 0, 1, 0.5, 1] if y is None else y
assert len(x) == len(y)
sizes = sizes if sizes else [len(x)]
logits = tf.constant(x, shape=sizes, dtype=dtype, name="logits")
targets = tf.constant(y, shape=sizes, dtype=dtype, name="targets")
losses = np.array(self._WeightedCrossEntropy(x, y, q)).reshape(*sizes)
return logits, targets, q, losses
def testConstructionNamed(self):
with self.test_session():
logits, targets, pos_weight, _ = self._Inputs()
loss = tf.nn.weighted_cross_entropy_with_logits(logits, targets,
pos_weight, name="mybce")
self.assertEqual("mybce", loss.op.name)
def testOutput(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
logits, targets, pos_weight, losses = self._Inputs(dtype=tf.float32)
loss = tf.nn.weighted_cross_entropy_with_logits(logits, targets,
pos_weight)
np_loss = np.array(losses).astype(np.float32)
tf_loss = loss.eval()
self.assertAllClose(np_loss, tf_loss, atol=0.001)
def testOutputMultiDim(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
logits, targets, pos_weight, losses = self._Inputs(dtype=tf.float32,
sizes=[2, 2, 2])
loss = tf.nn.weighted_cross_entropy_with_logits(logits, targets,
pos_weight)
np_loss = np.array(losses).astype(np.float32)
tf_loss = loss.eval()
self.assertAllClose(np_loss, tf_loss, atol=0.001)
def testGradient(self):
sizes = [4, 2]
with self.test_session():
logits, targets, pos_weight, _ = self._Inputs(sizes=sizes)
loss = tf.nn.weighted_cross_entropy_with_logits(logits, targets,
pos_weight)
err = tf.test.compute_gradient_error(logits, sizes, loss, sizes)
print("logistic loss gradient err = ", err)
self.assertLess(err, 1e-7)
def testShapeError(self):
with self.assertRaisesRegexp(ValueError, "must have the same shape"):
tf.nn.weighted_cross_entropy_with_logits([[2, 1]], [1, 2, 3], 2.0)
if __name__ == "__main__":
tf.test.main()
|
import subprocess, os, sys, re
def shellCommand( command, cwd=None ):
# The following line works with Python 2.7+
# return subprocess.check_output( command, shell=True, stderr=subprocess.STDOUT, cwd=cwd )
p = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd )
p.wait()
retcode = p.returncode
if retcode != 0:
raise Exception()
return p.communicate()[0]
def gitVersionString( cwd=None ):
SHA1 = ''
date = ''
try:
# The SHA1 and date should always be available if we have a git repo.
SHA1 = shellCommand( 'git show -s --format=%h', cwd ).strip()
date = shellCommand( 'git show -s --format=%ci', cwd ).split()[0]
except: # subprocess.CalledProcessError:
return None
# The tag check will always succeed if the repo starts with a v0.0 tag. To find only tags that
# were part of the first parent, use --first-parent.
tag = ''
try:
description = shellCommand( 'git describe --tags --long --match "v[0-9]*"', cwd ).rsplit('-',2)
tag = description[0] + ', '
commitsSinceTag = description[1]
if commitsSinceTag != '0':
tag = 'derived from ' + tag
except: # subprocess.CalledProcessError:
pass
return tag + "git commit " + SHA1 + " on " + date
def gitSvnVersionString( cwd=None ):
try:
date = shellCommand( 'git show -s --format=%ci', cwd ).split()[0]
revision = shellCommand( 'git svn find-rev $(git log --max-count 1 --pretty=format:%H)', cwd ).strip()
if len( revision ) > 0 and len( revision ) < 10:
return 'svn revision ' + revision + " on " + date
except: # subprocess.CalledProcessError:
pass
return None
def svnVersionString( cwd=None ):
try:
revisionString = shellCommand( 'svnversion .', cwd )
matchingRevision = re.search( r'\d+', revisionString )
if matchingRevision is not None:
return 'svn revision ' + matchingRevision.group(0)
except: # subprocess.CalledProcessError:
pass
return None
def repoVersionString( cwd=None ):
version = svnVersionString( cwd )
if version is None:
version = gitSvnVersionString( cwd )
if version is None:
version = gitVersionString( cwd )
if version is None:
version = "unknown"
return version
def writeRevision( app_name, app_revision, revision_header ):
# Use all caps for app name (by convention).
app_definition = app_name.upper() + '_REVISION'
# see if the revision is different
revision_changed = False
if os.path.exists(revision_header):
f = open(revision_header, "r")
buffer = f.read()
m = re.search( re.escape( app_definition) + r' "([^"]*)"',buffer )
if m is not None and m.group(1) != app_revision:
revision_changed = True
f.close()
else:
# Count it as changed if the header does not exist.
revision_changed = True
if revision_changed:
f = open(revision_header, "w")
f.write( '/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n' \
'\n'
'#ifndef ' + app_definition + '_H\n'
'#define ' + app_definition + '_H\n'
'\n'
'#define ' + app_definition + ' "' + app_revision + '"\n'
'\n'
'#endif // ' + app_definition + '_H\n')
f.close()
if len(sys.argv) == 4:
repo_location = sys.argv[1]
header_file = sys.argv[2]
app_name = sys.argv[3]
revision = repoVersionString( repo_location )
writeRevision( app_name, revision, header_file )
|
class HelpHandler(object):
def usage(self):
return '''
This plugin will give info about Zulip to
any user that types a message saying "help".
This is example code; ideally, you would flesh
this out for more useful help pertaining to
your Zulip instance.
'''
def triage_message(self, message, client):
# return True if we think the message may be of interest
original_content = message['content']
if message['type'] != 'stream':
return True
if original_content.lower().strip() != 'help':
return False
return True
def handle_message(self, message, client, state_handler):
help_content = '''
Info on Zulip can be found here:
https://github.com/zulip/zulip
'''.strip()
client.send_message(dict(
type='stream',
to=message['display_recipient'],
subject=message['subject'],
content=help_content,
))
handler_class = HelpHandler
|
import sys, os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../common/'))
sys.path.insert(0, os.path.join(curr_path, '../../../python'))
import models
import get_data
def assertRaises(expected_exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
except expected_exception as e:
pass
else:
# Did not raise exception
assert False, "%s did not raise %s" % (func.__name__, expected_exception.__name__)
|
import json
import unittest
import datetime
from decimal import Decimal
from twisted.internet import defer
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.http import Request, Response
class JsonEncoderTestCase(unittest.TestCase):
def setUp(self):
self.encoder = ScrapyJSONEncoder()
def test_encode_decode(self):
dt = datetime.datetime(2010, 1, 2, 10, 11, 12)
dts = "2010-01-02 10:11:12"
d = datetime.date(2010, 1, 2)
ds = "2010-01-02"
t = datetime.time(10, 11, 12)
ts = "10:11:12"
dec = Decimal("1000.12")
decs = "1000.12"
s = {'foo'}
ss = ['foo']
dt_set = {dt}
dt_sets = [dts]
for input, output in [('foo', 'foo'), (d, ds), (t, ts), (dt, dts),
(dec, decs), (['foo', d], ['foo', ds]), (s, ss),
(dt_set, dt_sets)]:
self.assertEqual(self.encoder.encode(input), json.dumps(output))
def test_encode_deferred(self):
self.assertIn('Deferred', self.encoder.encode(defer.Deferred()))
def test_encode_request(self):
r = Request("http://www.example.com/lala")
rs = self.encoder.encode(r)
self.assertIn(r.method, rs)
self.assertIn(r.url, rs)
def test_encode_response(self):
r = Response("http://www.example.com/lala")
rs = self.encoder.encode(r)
self.assertIn(r.url, rs)
self.assertIn(str(r.status), rs)
|
from .notifications import Notifications
from .recordings import Recordings
from twilio.rest.resources.call_feedback import (
CallFeedbackFactory,
CallFeedbackSummary,
)
from .util import normalize_dates, parse_date, transform_params
from . import InstanceResource, ListResource
class Call(InstanceResource):
""" A call resource """
BUSY = "busy"
CANCELED = "canceled"
COMPLETED = "completed"
FAILED = "failed"
IN_PROGRESS = "in-progress"
NO_ANSWER = "no-answer"
QUEUED = "queued"
RINGING = "ringing"
subresources = [
Notifications,
Recordings,
CallFeedbackFactory,
]
def hangup(self):
""" If this call is currenlty active, hang up the call.
If this call is scheduled to be made, remove the call
from the queue
"""
a = self.parent.hangup(self.name)
self.load(a.__dict__)
def cancel(self):
""" If the called is queued or rining, cancel the calls.
Will not affect in progress calls
"""
a = self.parent.cancel(self.name)
self.load(a.__dict__)
def route(self, **kwargs):
"""Route the specified :class:`Call` to another url.
:param url: A valid URL that returns TwiML.
:param method: HTTP method Twilio uses when requesting the above URL.
"""
a = self.parent.route(self.name, **kwargs)
self.load(a.__dict__)
def delete(self):
"""Delete the specified :class:`Call` record from Twilio."""
return self.parent.delete(self.name)
class Calls(ListResource):
""" A list of Call resources """
name = "Calls"
instance = Call
def __init__(self, *args, **kwargs):
super(Calls, self).__init__(*args, **kwargs)
self.summary = CallFeedbackSummary(self, *args, **kwargs)
@normalize_dates
def list(self, from_=None, ended_after=None,
ended_before=None, ended=None, started_before=None,
started_after=None, started=None, **kwargs):
"""
Returns a page of :class:`Call` resources as a list. For paging
informtion see :class:`ListResource`
:param date after: Only list calls started after this datetime
:param date before: Only list calls started before this datetime
"""
kwargs["from"] = from_
kwargs["StartTime<"] = started_before
kwargs["StartTime>"] = started_after
kwargs["StartTime"] = parse_date(started)
kwargs["EndTime<"] = ended_before
kwargs["EndTime>"] = ended_after
kwargs["EndTime"] = parse_date(ended)
return self.get_instances(kwargs)
@normalize_dates
def iter(self, from_=None, ended_after=None,
ended_before=None, ended=None, started_before=None,
started_after=None, started=None, **kwargs):
"""
Returns an iterator of :class:`Call` resources.
:param date after: Only list calls started after this datetime
:param date before: Only list calls started before this datetime
"""
kwargs["from"] = from_
kwargs["StartTime<"] = started_before
kwargs["StartTime>"] = started_after
kwargs["StartTime"] = parse_date(started)
kwargs["EndTime<"] = ended_before
kwargs["EndTime>"] = ended_after
kwargs["EndTime"] = parse_date(ended)
return super(Calls, self).iter(**kwargs)
def create(self, to, from_, url, status_method=None, status_events=None,
**kwargs):
"""
Make a phone call to a number.
:param str to: The phone number to call
:param str `from_`: The caller ID (must be a verified Twilio number)
:param str url: The URL to read TwiML from when the call connects
:param method: The HTTP method Twilio should use to request the url
:type method: None (defaults to 'POST'), 'GET', or 'POST'
:param str fallback_url: A URL that Twilio will request if an error
occurs requesting or executing the TwiML at url
:param str fallback_method: The HTTP method that Twilio should use
to request the fallback_url
:type fallback_method: None (will make 'POST' request),
'GET', or 'POST'
:param str status_callback: A URL that Twilio will request when the
call ends to notify your app.
:param str status_method: The HTTP method Twilio should use when
requesting the above URL.
:param list status_events: A list of call progress events Twilio
should send status callback requests on. One or more of:
'initiated', 'ringing', 'answered', 'completed'. Defaults to
['completed'] if not provided. 'completed' events are sent
free of charge; see twilio.com for current pricing on others.
:param str if_machine: Tell Twilio to try and determine if a machine
(like voicemail) or a human has answered the call.
See more in our `answering machine documentation
<http://www.twilio.com/docs/api/rest/making_calls>`_.
:type if_machine: None, 'Continue', or 'Hangup'
:param str send_digits: A string of keys to dial after
connecting to the number.
:type send_digits: None or any combination of
(0-9), '#', '*' or 'w' (to insert a half second pause).
:param int timeout: The integer number of seconds that Twilio should
allow the phone to ring before assuming there is no answer.
:param str application_sid: The 34 character sid of the application
Twilio should use to handle this phone call.
Should not be used in conjunction with the url parameter.
:return: A :class:`Call` object
"""
kwargs["from"] = from_
kwargs["to"] = to
kwargs["url"] = url
kwargs["status_callback_method"] = status_method
kwargs["status_callback_event"] = status_events
return self.create_instance(kwargs)
def update(self, sid, **kwargs):
return self.update_instance(sid, kwargs)
def cancel(self, sid):
""" If this call is queued or ringing, cancel the call.
Will not affect in-progress calls.
:param sid: A Call Sid for a specific call
:returns: Updated :class:`Call` resource
"""
return self.update(sid, status=Call.CANCELED)
def hangup(self, sid):
""" If this call is currently active, hang up the call. If this call is
scheduled to be made, remove the call from the queue.
:param sid: A Call Sid for a specific call
:returns: Updated :class:`Call` resource
"""
return self.update(sid, status=Call.COMPLETED)
def route(self, sid, url, method="POST"):
"""Route the specified :class:`Call` to another url.
:param sid: A Call Sid for a specific call
:param url: A valid URL that returns TwiML.
:param method: The HTTP method Twilio uses when requesting the URL.
:returns: Updated :class:`Call` resource
"""
return self.update(sid, url=url, method=method)
def feedback(self, sid, quality_score, issue=None):
""" Create feedback for the given call.
:param sid: A Call Sid for a specific call
:param quality_score: The quality of the call
:param issue: A list of issues experienced during the call
:returns: A :class:`CallFeedback` object
"""
uri = "%s/%s" % (self.uri, sid)
call_feedback_factory = CallFeedbackFactory(
uri, self.auth, self.timeout
)
return call_feedback_factory.create(
quality_score=quality_score, issue=issue
)
def delete(self, sid):
"""Delete the given Call record from Twilio."""
return self.delete_instance(sid)
|
import glob
import os
from cinder import exception
from cinder.i18n import _
from cinder import test
class ExceptionTestCase(test.TestCase):
@staticmethod
def _raise_exc(exc):
raise exc()
def test_exceptions_raise(self):
# NOTE(dprince): disable format errors since we are not passing kwargs
self.flags(fatal_exception_format_errors=False)
for name in dir(exception):
exc = getattr(exception, name)
if isinstance(exc, type):
self.assertRaises(exc, self._raise_exc, exc)
class ProjectTestCase(test.TestCase):
def test_all_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
missing_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and not has_downgrade:
fname = os.path.basename(path)
missing_downgrade.append(fname)
helpful_msg = (_("The following migrations are missing a downgrade:"
"\n\t%s") % '\n\t'.join(sorted(missing_downgrade)))
self.assertFalse(missing_downgrade, msg=helpful_msg)
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
qualities,
)
class NDRBaseIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = next(group for group in mobj.groups() if group)
webpage = self._download_webpage(url, display_id)
return self._extract_embed(webpage, display_id)
class NDRIE(NDRBaseIE):
IE_NAME = 'ndr'
IE_DESC = 'NDR.de - Norddeutscher Rundfunk'
_VALID_URL = r'https?://(?:www\.)?ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html'
_TESTS = [{
# httpVideo, same content id
'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
'md5': '6515bc255dc5c5f8c85bbc38e035a659',
'info_dict': {
'id': 'hafengeburtstag988',
'display_id': 'Party-Poette-und-Parade',
'ext': 'mp4',
'title': 'Party, Pötte und Parade',
'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c',
'uploader': 'ndrtv',
'timestamp': 1431108900,
'upload_date': '20150510',
'duration': 3498,
},
'params': {
'skip_download': True,
},
}, {
# httpVideo, different content id
'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html',
'md5': '1043ff203eab307f0c51702ec49e9a71',
'info_dict': {
'id': 'osna272',
'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch',
'ext': 'mp4',
'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights',
'description': 'md5:32e9b800b3d2d4008103752682d5dc01',
'uploader': 'ndrtv',
'timestamp': 1442059200,
'upload_date': '20150912',
'duration': 510,
},
'params': {
'skip_download': True,
},
}, {
# httpAudio, same content id
'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html',
'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
'info_dict': {
'id': 'audio51535',
'display_id': 'La-Valette-entgeht-der-Hinrichtung',
'ext': 'mp3',
'title': 'La Valette entgeht der Hinrichtung',
'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
'uploader': 'ndrinfo',
'timestamp': 1290626100,
'upload_date': '20140729',
'duration': 884,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html',
'only_matching': True,
}]
def _extract_embed(self, webpage, display_id):
embed_url = self._html_search_meta(
'embedURL', webpage, 'embed URL', fatal=True)
description = self._search_regex(
r'<p[^>]+itemprop="description">([^<]+)</p>',
webpage, 'description', default=None) or self._og_search_description(webpage)
timestamp = parse_iso8601(
self._search_regex(
r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"',
webpage, 'upload date', fatal=False))
return {
'_type': 'url_transparent',
'url': embed_url,
'display_id': display_id,
'description': description,
'timestamp': timestamp,
}
class NJoyIE(NDRBaseIE):
IE_NAME = 'njoy'
IE_DESC = 'N-JOY'
_VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html'
_TESTS = [{
# httpVideo, same content id
'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
'md5': 'cb63be60cd6f9dd75218803146d8dc67',
'info_dict': {
'id': 'comedycontest2480',
'display_id': 'Benaissa-beim-NDR-Comedy-Contest',
'ext': 'mp4',
'title': 'Benaissa beim NDR Comedy Contest',
'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39',
'uploader': 'ndrtv',
'upload_date': '20141129',
'duration': 654,
},
'params': {
'skip_download': True,
},
}, {
# httpVideo, different content id
'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html',
'md5': '417660fffa90e6df2fda19f1b40a64d8',
'info_dict': {
'id': 'dockville882',
'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-',
'ext': 'mp4',
'title': '"Ich hab noch nie" mit Felix Jaehn',
'description': 'md5:85dd312d53be1b99e1f998a16452a2f3',
'uploader': 'njoy',
'upload_date': '20150822',
'duration': 211,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html',
'only_matching': True,
}]
def _extract_embed(self, webpage, display_id):
video_id = self._search_regex(
r'<iframe[^>]+id="pp_([\da-z]+)"', webpage, 'embed id')
description = self._search_regex(
r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>',
webpage, 'description', fatal=False)
return {
'_type': 'url_transparent',
'ie_key': 'NDREmbedBase',
'url': 'ndr:%s' % video_id,
'display_id': display_id,
'description': description,
}
class NDREmbedBaseIE(InfoExtractor):
IE_NAME = 'ndr:embed:base'
_VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)'
_TESTS = [{
'url': 'ndr:soundcheck3366',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/soundcheck3366-ppjson.json',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_s')
ppjson = self._download_json(
'http://www.ndr.de/%s-ppjson.json' % video_id, video_id)
playlist = ppjson['playlist']
formats = []
quality_key = qualities(('xs', 's', 'm', 'l', 'xl'))
for format_id, f in playlist.items():
src = f.get('src')
if not src:
continue
ext = determine_ext(src, None)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds'))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native'))
else:
quality = f.get('quality')
ff = {
'url': src,
'format_id': quality or format_id,
'quality': quality_key(quality),
}
type_ = f.get('type')
if type_ and type_.split('/')[0] == 'audio':
ff['vcodec'] = 'none'
ff['ext'] = ext or 'mp3'
formats.append(ff)
self._sort_formats(formats)
config = playlist['config']
live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive']
title = config['title']
if live:
title = self._live_title(title)
uploader = ppjson.get('config', {}).get('branding')
upload_date = ppjson.get('config', {}).get('publicationDate')
duration = int_or_none(config.get('duration'))
thumbnails = [{
'id': thumbnail.get('quality') or thumbnail_id,
'url': thumbnail['src'],
'preference': quality_key(thumbnail.get('quality')),
} for thumbnail_id, thumbnail in config.get('poster', {}).items() if thumbnail.get('src')]
return {
'id': video_id,
'title': title,
'is_live': live,
'uploader': uploader if uploader != '-' else None,
'upload_date': upload_date[0:8] if upload_date else None,
'duration': duration,
'thumbnails': thumbnails,
'formats': formats,
}
class NDREmbedIE(NDREmbedBaseIE):
IE_NAME = 'ndr:embed'
_VALID_URL = r'https?://(?:www\.)?ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html'
_TESTS = [{
'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html',
'md5': '8b9306142fe65bbdefb5ce24edb6b0a9',
'info_dict': {
'id': 'ndraktuell28488',
'ext': 'mp4',
'title': 'Norddeutschland begrüßt Flüchtlinge',
'is_live': False,
'uploader': 'ndrtv',
'upload_date': '20150907',
'duration': 132,
},
}, {
'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html',
'md5': '002085c44bae38802d94ae5802a36e78',
'info_dict': {
'id': 'soundcheck3366',
'ext': 'mp4',
'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen',
'is_live': False,
'uploader': 'ndr2',
'upload_date': '20150912',
'duration': 3554,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ndr.de/info/audio51535-player.html',
'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
'info_dict': {
'id': 'audio51535',
'ext': 'mp3',
'title': 'La Valette entgeht der Hinrichtung',
'is_live': False,
'uploader': 'ndrinfo',
'upload_date': '20140729',
'duration': 884,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html',
'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c',
'info_dict': {
'id': 'visite11010',
'ext': 'mp4',
'title': 'Visite - die ganze Sendung',
'is_live': False,
'uploader': 'ndrtv',
'upload_date': '20150902',
'duration': 3525,
},
'params': {
'skip_download': True,
},
}, {
# httpVideoLive
'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html',
'info_dict': {
'id': 'livestream217',
'ext': 'flv',
'title': r're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'is_live': True,
'upload_date': '20150910',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/doku952-player.html',
'only_matching': True,
}]
class NJoyEmbedIE(NDREmbedBaseIE):
IE_NAME = 'njoy:embed'
_VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html'
_TESTS = [{
# httpVideo
'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html',
'md5': '8483cbfe2320bd4d28a349d62d88bd74',
'info_dict': {
'id': 'doku948',
'ext': 'mp4',
'title': 'Zehn Jahre Reeperbahn Festival - die Doku',
'is_live': False,
'upload_date': '20150807',
'duration': 1011,
},
}, {
# httpAudio
'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html',
'md5': 'd989f80f28ac954430f7b8a48197188a',
'info_dict': {
'id': 'stefanrichter100',
'ext': 'mp3',
'title': 'Interview mit einem Augenzeugen',
'is_live': False,
'uploader': 'njoy',
'upload_date': '20150909',
'duration': 140,
},
'params': {
'skip_download': True,
},
}, {
# httpAudioLive, no explicit ext
'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html',
'info_dict': {
'id': 'webradioweltweit100',
'ext': 'mp3',
'title': r're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'is_live': True,
'uploader': 'njoy',
'upload_date': '20150810',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html',
'only_matching': True,
}, {
'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html',
'only_matching': True,
}, {
'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html',
'only_matching': True,
}]
|
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron import manager
RESOURCE_ATTRIBUTE_MAP = {
'network_profiles': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:regex': attributes.UUID_PATTERN},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'segment_type': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': ''},
'sub_type': {'allow_post': True, 'allow_put': False,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED},
'segment_range': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'multicast_ip_range': {'allow_post': True, 'allow_put': True,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED},
'multicast_ip_index': {'allow_post': False, 'allow_put': False,
'is_visible': False, 'default': '0'},
'physical_network': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'is_visible': False, 'default': ''},
'add_tenants': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': attributes.convert_none_to_empty_list},
'remove_tenants': {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': attributes.convert_none_to_empty_list,
},
},
'network_profile_bindings': {
'profile_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:regex': attributes.UUID_PATTERN},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'is_visible': True},
},
}
class Network_profile(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Cisco N1kv Network Profiles"
@classmethod
def get_alias(cls):
return 'network_profile'
@classmethod
def get_description(cls):
return ("Profile includes the type of profile for N1kv")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/n1kv/network-profile/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extended Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
for resource_name in ['network_profile', 'network_profile_binding']:
collection_name = resource_name + "s"
controller = base.create_resource(
collection_name,
resource_name,
plugin,
RESOURCE_ATTRIBUTE_MAP.get(collection_name))
ex = extensions.ResourceExtension(collection_name,
controller)
exts.append(ex)
return exts
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.