code
stringlengths 1
199k
|
|---|
from AlgorithmImports import *
class IndexOptionPutITMExpiryRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2021, 1, 4)
self.SetEndDate(2021, 1, 31)
self.spx = self.AddIndex("SPX", Resolution.Minute).Symbol
# Select a index option expiring ITM, and adds it to the algorithm.
self.spxOption = list(self.OptionChainProvider.GetOptionContractList(self.spx, self.Time))
self.spxOption = [i for i in self.spxOption if i.ID.StrikePrice >= 4200 and i.ID.OptionRight == OptionRight.Put and i.ID.Date.year == 2021 and i.ID.Date.month == 1]
self.spxOption = list(sorted(self.spxOption, key=lambda x: x.ID.StrikePrice))[0]
self.spxOption = self.AddIndexOptionContract(self.spxOption, Resolution.Minute).Symbol
self.expectedContract = Symbol.CreateOption(self.spx, Market.USA, OptionStyle.European, OptionRight.Put, 4200, datetime(2021, 1, 15))
if self.spxOption != self.expectedContract:
raise Exception(f"Contract {self.expectedContract} was not found in the chain")
self.Schedule.On(self.DateRules.Tomorrow, self.TimeRules.AfterMarketOpen(self.spx, 1), lambda: self.MarketOrder(self.spxOption, 1))
def OnData(self, data: Slice):
# Assert delistings, so that we can make sure that we receive the delisting warnings at
# the expected time. These assertions detect bug #4872
for delisting in data.Delistings.Values:
if delisting.Type == DelistingType.Warning:
if delisting.Time != datetime(2021, 1, 15):
raise Exception(f"Delisting warning issued at unexpected date: {delisting.Time}")
if delisting.Type == DelistingType.Delisted:
if delisting.Time != datetime(2021, 1, 16):
raise Exception(f"Delisting happened at unexpected date: {delisting.Time}")
def OnOrderEvent(self, orderEvent: OrderEvent):
if orderEvent.Status != OrderStatus.Filled:
# There's lots of noise with OnOrderEvent, but we're only interested in fills.
return
if orderEvent.Symbol not in self.Securities:
raise Exception(f"Order event Symbol not found in Securities collection: {orderEvent.Symbol}")
security = self.Securities[orderEvent.Symbol]
if security.Symbol == self.spx:
self.AssertIndexOptionOrderExercise(orderEvent, security, self.Securities[self.expectedContract])
elif security.Symbol == self.expectedContract:
self.AssertIndexOptionContractOrder(orderEvent, security)
else:
raise Exception(f"Received order event for unknown Symbol: {orderEvent.Symbol}")
def AssertIndexOptionOrderExercise(self, orderEvent: OrderEvent, index: Security, optionContract: Security):
expectedLiquidationTimeUtc = datetime(2021, 1, 15)
if orderEvent.Direction == OrderDirection.Buy and orderEvent.UtcTime != expectedLiquidationTimeUtc:
raise Exception(f"Liquidated index option contract, but not at the expected time. Expected: {expectedLiquidationTimeUtc} - found {orderEvent.UtcTime}")
# No way to detect option exercise orders or any other kind of special orders
# other than matching strings, for now.
if "Option Exercise" in orderEvent.Message:
if orderEvent.FillPrice != 3300:
raise Exception("Option did not exercise at expected strike price (3300)")
if optionContract.Holdings.Quantity != 0:
raise Exception(f"Exercised option contract, but we have holdings for Option contract {optionContract.Symbol}")
def AssertIndexOptionContractOrder(self, orderEvent: OrderEvent, option: Security):
if orderEvent.Direction == OrderDirection.Buy and option.Holdings.Quantity != 1:
raise Exception(f"No holdings were created for option contract {option.Symbol}")
if orderEvent.Direction == OrderDirection.Sell and option.Holdings.Quantity != 0:
raise Exception(f"Holdings were found after a filled option exercise")
if "Exercise" in orderEvent.Message and option.Holdings.Quantity != 0:
raise Exception(f"Holdings were found after exercising option contract {option.Symbol}")
### <summary>
### Ran at the end of the algorithm to ensure the algorithm has no holdings
### </summary>
### <exception cref="Exception">The algorithm has holdings</exception>
def OnEndOfAlgorithm(self):
if self.Portfolio.Invested:
raise Exception(f"Expected no holdings at end of algorithm, but are invested in: {', '.join(self.Portfolio.Keys)}")
|
from flask.ext.wtf import Form, TextField, Required, PasswordField, BooleanField, ValidationError, validators
from labmanager.babel import gettext, lazy_gettext
class RetrospectiveForm(Form):
def get_field_names(self):
field_names = []
for field in self:
if 'csrf' not in str(type(field)).lower():
field_names.append(field.name)
return field_names
class AddForm(RetrospectiveForm):
name = TextField(lazy_gettext('Name'), validators = [ validators.Required() ], default = 'RLMS name', description = lazy_gettext("Name of the RLMS"))
url = TextField(lazy_gettext('URL'), validators = [ validators.URL(require_tld = False), validators.Required() ], default = 'http://rlms-address/', description = lazy_gettext('Main URL of the RLMS'))
location = TextField(lazy_gettext('Location'), validators = [ validators.Required() ], default = 'City, Country', description = lazy_gettext("City and country where the RLMS is hosted"))
publicly_available = BooleanField(lazy_gettext('Public'), default = False, description = lazy_gettext("Do you want to provide access to this laboratory publicly?"))
public_identifier = TextField(lazy_gettext('Public identifier'), default = '', description = lazy_gettext("If publicly available, under what identifier?"))
default_autoload = BooleanField(lazy_gettext('Autoload'), default = False, description = lazy_gettext("Should these labs be loaded by default?"))
def __init__(self, **kwargs):
default_name = getattr(self, 'DEFAULT_NAME', getattr(self, 'DEFAULT_PUBLIC_IDENTIFIER', None))
if default_name:
kwargs.setdefault('name', default_name)
default_location = getattr(self, 'DEFAULT_LOCATION', None)
if default_location:
kwargs.setdefault('location', default_location)
default_url = getattr(self, 'DEFAULT_URL', None)
if default_url:
kwargs.setdefault('url', default_url)
default_publicly_available = getattr(self, 'DEFAULT_PUBLICLY_AVAILABLE', None)
if default_publicly_available is not None:
kwargs.setdefault('publicly_available', default_publicly_available)
default_public_identifier = getattr(self, 'DEFAULT_PUBLIC_IDENTIFIER', '')
if default_public_identifier:
kwargs.setdefault('public_identifier', default_public_identifier)
default_autoload = getattr(self, 'DEFAULT_AUTOLOAD', None)
if default_autoload is not None:
kwargs.setdefault('default_autoload', default_autoload)
super(AddForm, self).__init__(**kwargs)
class AddLmsForm(RetrospectiveForm):
name = TextField(lazy_gettext("Name"), validators = [ Required() ])
url = TextField(lazy_gettext("URL"), validators = [ Required() ])
lms_login = TextField(lazy_gettext("LMS login"), validators = [ Required() ])
lms_password = PasswordField(lazy_gettext("LMS password"))
labmanager_login = TextField(lazy_gettext("Labmanager login"), validators = [ Required() ])
labmanager_password = PasswordField(lazy_gettext("Labmanager password"))
def __init__(self, add_or_edit, *args, **kwargs):
super(AddLmsForm, self).__init__(*args, **kwargs)
self.add_or_edit = add_or_edit
def validate_lms_password(form, field):
if form.add_or_edit and field.data == '':
raise ValidationError(gettext("This field is required."))
def validate_labmanager_password(form, field):
if form.add_or_edit and field.data == '':
raise ValidationError(gettext("This field is required."))
class AddUserForm(RetrospectiveForm):
name = TextField(lazy_gettext("Name"), validators = [ Required() ])
login = TextField(lazy_gettext("Login"), validators = [ Required() ])
password = PasswordField(lazy_gettext("Password"))
def __init__(self, add_or_edit, *args, **kwargs):
super(AddUserForm, self).__init__(*args, **kwargs)
self.add_or_edit = add_or_edit
def validate_password(form, field):
if form.add_or_edit and field.data == '':
raise ValidationError(gettext("This field is required."))
class GenericPermissionForm(RetrospectiveForm):
identifier = TextField(lazy_gettext("Identifier"), validators = [ Required() ])
def login_validator(form, field):
invalid_chars = [ c
for c in field.data
if c.isupper() or not c.isalnum() and c not in '._' ]
if invalid_chars:
raise ValidationError(gettext('Invalid characters found: %(char)s', char=', '.join(invalid_chars)))
if len(field.data) < 5:
raise ValidationError(gettext('login lenght must be at least 5 characters long'))
USER_LOGIN_DEFAULT_VALIDATORS = [validators.Regexp("^[a-z0-9\.\_]{5,}$"), login_validator]
def password_validator(form, field):
if len(field.data) > 0:
invalid_chars = [ c
for c in field.data
if c.isspace() ]
if invalid_chars:
raise ValidationError(gettext('Passwords can not contain a space'))
if len(field.data) < 8:
raise ValidationError(gettext('password lenght must be at least 8 characters long'))
USER_PASSWORD_DEFAULT_VALIDATORS = [validators.Optional(),validators.Regexp("[^\s]{8,}"), password_validator]
class RegistrationPermissionForm(RetrospectiveForm):
identifier = TextField(lazy_gettext("Identifier"), validators = [ Required() ])
def school_full_name_validator(form, field):
if len(field.data) < 4 or len(field.data) > 50:
raise ValidationError(gettext('Oficial name must be between 4 and 50 characters long'))
SCHOOL_FULL_NAME_VALIDATORS = [school_full_name_validator]
def school_short_name_validator(form, field):
if len(field.data) < 4 or len(field.data) > 15:
raise ValidationError(gettext('Short name must be between 4 and 15 characters long'))
else:
invalid_chars = [ c
for c in field.data
if not c.isalnum() and c not in '.']
if invalid_chars:
raise ValidationError(gettext('Invalid characters found: %(char)s', char=', '.join(invalid_chars)))
SCHOOL_SHORT_NAME_VALIDATORS = [validators.Regexp("^[a-z0-9\.]{4,15}$"), school_short_name_validator]
def user_full_name_validator(form, field):
if len(field.data) < 4 or len(field.data) > 15:
raise ValidationError(gettext('Oficial name must be between 4 and 15 characters long'))
USER_FULL_NAME_VALIDATORS = [user_full_name_validator]
|
import numpy as np
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_raises)
import skimage
from skimage import data
from skimage._shared._warnings import expected_warnings
from skimage.filters.thresholding import (threshold_adaptive,
threshold_otsu,
threshold_li,
threshold_yen,
threshold_isodata)
class TestSimpleImage():
def setup(self):
self.image = np.array([[0, 0, 1, 3, 5],
[0, 1, 4, 3, 4],
[1, 2, 5, 4, 1],
[2, 4, 5, 2, 1],
[4, 5, 1, 0, 0]], dtype=int)
def test_otsu(self):
assert threshold_otsu(self.image) == 2
def test_otsu_negative_int(self):
image = self.image - 2
assert threshold_otsu(image) == 0
def test_otsu_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_otsu(image) < 3
def test_li(self):
assert int(threshold_li(self.image)) == 2
def test_li_negative_int(self):
image = self.image - 2
assert int(threshold_li(image)) == 0
def test_li_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_li(image) < 3
def test_yen(self):
assert threshold_yen(self.image) == 2
def test_yen_negative_int(self):
image = self.image - 2
assert threshold_yen(image) == 0
def test_yen_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_yen(image) < 3
def test_yen_arange(self):
image = np.arange(256)
assert threshold_yen(image) == 127
def test_yen_binary(self):
image = np.zeros([2,256], dtype=np.uint8)
image[0] = 255
assert threshold_yen(image) < 1
def test_yen_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_yen(image) == 0
def test_yen_blank_max(self):
image = np.empty((5, 5), dtype=np.uint8)
image.fill(255)
assert threshold_yen(image) == 255
def test_isodata(self):
assert threshold_isodata(self.image) == 2
assert threshold_isodata(self.image, return_all=True) == [2]
def test_isodata_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_isodata(image) == 0
assert threshold_isodata(image, return_all=True) == [0]
def test_isodata_linspace(self):
image = np.linspace(-127, 0, 256)
assert -63.8 < threshold_isodata(image) < -63.6
assert_almost_equal(threshold_isodata(image, return_all=True),
[-63.74804688, -63.25195312])
def test_isodata_16bit(self):
np.random.seed(0)
imfloat = np.random.rand(256, 256)
assert 0.49 < threshold_isodata(imfloat, nbins=1024) < 0.51
assert all(0.49 < threshold_isodata(imfloat, nbins=1024,
return_all=True))
def test_threshold_adaptive_generic(self):
def func(arr):
return arr.sum() / arr.shape[0]
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='generic', param=func)
assert_equal(ref, out)
def test_threshold_adaptive_gaussian(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='gaussian')
assert_equal(ref, out)
out = threshold_adaptive(self.image, 3, method='gaussian', param=1.0 / 3.0)
assert_equal(ref, out)
def test_threshold_adaptive_mean(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='mean')
assert_equal(ref, out)
def test_threshold_adaptive_median(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, False],
[False, False, True, False, False],
[False, False, True, True, False],
[False, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='median')
assert_equal(ref, out)
def test_otsu_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 86 < threshold_otsu(camera) < 88
def test_otsu_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 106 < threshold_otsu(coins) < 108
def test_otsu_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.41 < threshold_otsu(coins) < 0.42
def test_otsu_astro_image():
img = skimage.img_as_ubyte(data.astronaut())
with expected_warnings(['grayscale']):
assert 109 < threshold_otsu(img) < 111
def test_otsu_one_color_image():
img = np.ones((10, 10), dtype=np.uint8)
assert_raises(TypeError, threshold_otsu, img)
def test_li_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 63 < threshold_li(camera) < 65
def test_li_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 95 < threshold_li(coins) < 97
def test_li_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.37 < threshold_li(coins) < 0.38
def test_li_astro_image():
img = skimage.img_as_ubyte(data.astronaut())
assert 66 < threshold_li(img) < 68
def test_yen_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 197 < threshold_yen(camera) < 199
def test_yen_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 109 < threshold_yen(coins) < 111
def test_yen_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.43 < threshold_yen(coins) < 0.44
def test_adaptive_even_block_size_error():
img = data.camera()
assert_raises(ValueError, threshold_adaptive, img, block_size=4)
def test_isodata_camera_image():
camera = skimage.img_as_ubyte(data.camera())
threshold = threshold_isodata(camera)
assert np.floor((camera[camera <= threshold].mean() +
camera[camera > threshold].mean()) / 2.0) == threshold
assert threshold == 87
assert threshold_isodata(camera, return_all=True) == [87]
def test_isodata_coins_image():
coins = skimage.img_as_ubyte(data.coins())
threshold = threshold_isodata(coins)
assert np.floor((coins[coins <= threshold].mean() +
coins[coins > threshold].mean()) / 2.0) == threshold
assert threshold == 107
assert threshold_isodata(coins, return_all=True) == [107]
def test_isodata_moon_image():
moon = skimage.img_as_ubyte(data.moon())
threshold = threshold_isodata(moon)
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert threshold == 86
thresholds = threshold_isodata(moon, return_all=True)
for threshold in thresholds:
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert_equal(thresholds, [86, 87, 88, 122, 123, 124, 139, 140])
def test_isodata_moon_image_negative_int():
moon = skimage.img_as_ubyte(data.moon()).astype(np.int32)
moon -= 100
threshold = threshold_isodata(moon)
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert threshold == -14
thresholds = threshold_isodata(moon, return_all=True)
for threshold in thresholds:
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert_equal(thresholds, [-14, -13, -12, 22, 23, 24, 39, 40])
def test_isodata_moon_image_negative_float():
moon = skimage.img_as_ubyte(data.moon()).astype(np.float64)
moon -= 100
assert -14 < threshold_isodata(moon) < -13
thresholds = threshold_isodata(moon, return_all=True)
assert_almost_equal(thresholds,
[-13.83789062, -12.84179688, -11.84570312, 22.02148438,
23.01757812, 24.01367188, 38.95507812, 39.95117188])
if __name__ == '__main__':
np.testing.run_module_suite()
|
from os.path import join
from setuptools import setup, find_packages
import pygithub3
try:
import multiprocessing
import logging
except ImportError:
pass
setup(
name=pygithub3.__name__,
version=pygithub3.__version__,
author=pygithub3.__author__,
author_email=pygithub3.__email__,
url='https://github.com/copitux/python-github3',
description='Python wrapper for the github v3 api',
long_description=open('README.rst').read(),
license='ISC',
packages=find_packages(exclude=['*tests*']),
test_suite='nose.collector',
tests_require=[
'nose',
'mock',
],
install_requires=map(str.strip, open(join('requirements', 'base.txt'))),
include_package_data=True,
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
),
)
|
"""Print SBML model species and PySB model species for 1-1 comparison."""
from __future__ import division
import difflib
import re
import argparse
import sys
import rasmodel.chen_2009
import rasmodel.chen_2009.original_sbml
import pysb
from pysb.bng import generate_equations
def get_pysb_species():
"""Return species names mostly aligned to SBML model naming convention."""
# Note that when we refer to to "sbml model" or just "sbml" below we
# specifically mean the sbml version of the Chen 2009 model (as opposed to
# the pysb version of it) and not any sbml model in general.
model = rasmodel.chen_2009.model
# This is a rough total ordering of the protein names as used in the sbml
# model species labels. E.g. EGF always comes before ErbB1. The sbml naming
# is not totally consistent which is why this is a "rough" ordering. We will
# use this ordering below to sort pysb monomer names within each species to
# produce names ordered as similarly as possible to the sbml ones.
ordering = ('EGF HRG ErbB1 ErbB2 ErbB3 ErbB4 ATP RTK_Pase GAP Shc Grb2 '
'Gab1 Shp2 Pase9t PI3K PIP2 Sos ERK MEK Pase2 Pase3 Raf Pase1 '
'Ras cPP PIP3 AKT Pase4 PDK1 PTEN Shp').split()
ordering_map = {p: i for i, p in enumerate(ordering)}
# Augment species objects with their original numbering.
for i, s in enumerate(model.species):
s.index = i
# Throw out source and sink species and convert to strings.
species = [s for s in model.species
if str(s) not in ('__source()', '__sink()')]
species_str = [str(s) for s in species]
# Split complexes on % to produce monomers.
monomers_in_species = [i.split(" % ") for i in species_str]
labels = []
for mlist in monomers_in_species:
# Fix some minor spelling/case differences in some proteins.
mlist = [s.replace('SHC', 'Shc') for s in mlist]
mlist = [s.replace('SOS', 'Sos') for s in mlist]
mlist = [s.replace('Pase_9t', 'Pase9t') for s in mlist]
mlist = [s.replace('RTK', 'RTK_Pase') for s in mlist]
mlist = [s.replace('RAF', 'Raf') for s in mlist]
mlist = [s.replace('RAS', 'Ras') for s in mlist]
# Topological sort on monomers based on protein ordering defined above.
temp = sorted(mlist, key=lambda s: ordering_map[s[:s.index('(')]])
# Convert various state flags to text suffixes used in sbml.
temp = [re.sub(r'([^(]+).*state=\'p\'.*', r'\1#P', i) for i in temp]
temp = [re.sub(r'([^(]+).*state=\'pp\'.*', r'\1#P#P', i) for i in temp]
temp = [re.sub(r'([^(]+).*state=\'gdp\'.*', r'\1:GDP', i) for i in temp]
temp = [re.sub(r'([^(]+).*state=\'gtp\'.*', r'\1:GTP', i) for i in temp]
temp = [re.sub(r'([^(]+).*state=\'active_gtp\'.*', r'\1_activated:GTP', i) for i in temp]
temp = [re.sub(r'([^(]+).*state=\'full_act\'.*', r'\1-FullActive', i) for i in temp]
temp = [re.sub(r'(Raf).*state=\'p_ser\'.*', r'\1:P:Ser', i) for i in temp]
# Strip remaining sites and parens, leaving just name and suffix.
temp = [re.sub(r'([^(]+).*', r'\1', i) for i in temp]
# Join the proteins back together on : as used in sbml.
s = ':'.join(temp)
# Apply some special case naming patterns to match sbml.
s = re.sub(r'ATP:(GAP:Grb2:Gab1)', r'\1:ATP', s)
s = re.sub(r'EGF:EGF', r'EGF', s)
s = re.sub(r'(PIP2:?){2,}', lambda m: '(PIP2)%d' % ((len(m.group())+1)/5), s)
s = re.sub(r'(ErbB\d)#P:(ErbB\d)#P', r'(\1:\2)#P', s)
s = re.sub(r'^\(ErbB2:ErbB([34])\)', r'(ErbB\1:ErbB2)', s)
s = re.sub(r'^\(ErbB2:ErbB2\)', r'2(ErbB2)', s)
s = re.sub(r'EGF:\(ErbB1:ErbB1\)', r'2(EGF:ErbB1)', s)
s = re.sub(r'^EGF:ErbB1:ErbB1:ATP$', r'2(EGF:ErbB1:ATP)', s)
s = re.sub(r'(Shc#P)', r'(\1)', s)
s = re.sub(r'(Sos:)(Ras:G[DT]P)', r'\1(\2)', s)
s = re.sub(r'AKT#P#P', r'AKT:P:P', s)
s = re.sub(r'HRG:ErbB1:ErbB([34])', r'(HRG:ErbB\1:ErbB1)', s)
s = re.sub(r'HRG:ErbB2:ErbB([34])', r'(HRG:ErbB\1):ErbB2', s)
if '-FullActive' in s:
s = s.replace('-FullActive', '')
s = s + '-FullActive'
s = re.sub(r'^EGF:ErbB1:ErbB1:ATP:ATP(-FullActive|)', r'2(EGF:ErbB1:ATP)\1', s)
labels.append(s)
for i, comp in enumerate(species_str):
if "comp='endo'" in comp:
# For species in the endo compartment, prepend endo prefix to name
# and apply one special case name fixup.
labels[i] = re.sub(r'(EGF:ErbB1:ErbB[234])$', r'(\1)', labels[i])
labels[i] = 'endo|' + labels[i]
ics = [''] * len(species)
for ic_species, ic_parameter in model.initial_conditions:
if ic_parameter.value != 0 and str(ic_species) != '__source()':
idx = next(i for i, s in enumerate(species)
if s.is_equivalent_to(ic_species))
ics[idx] = ' @ %.17g' % ic_parameter.value
names = [label + ic for ic, label in zip(ics, labels)]
# Sort names and species by names.
names, species = zip(*sorted(zip(names, species)))
return names, species
def get_sbml_species():
model = rasmodel.chen_2009.original_sbml.model
# We will ignore species whose labels contain these strings.
ignore_patterns = ('_i', '_h', 'Inh')
# We will ignore these individually named species.
ignore_names = (
# Degradation sinks.
'c13', 'c520', 'c86',
# MEK#P#P:ERK "_i" species that are missing _i in label.
'c80', 'c82', 'c96', 'c98',
)
species = [s for s in model.species
if not any(i in s.label for i in ignore_patterns)
and s.name not in ignore_names]
labels = [s.label for s in species]
ics = [' @ %.17g' % s.initial_amount if s.initial_amount != 0 else ''
for s in species]
names = [label + ic for label, ic in zip(labels, ics)]
# Sort names and species by names.
names, species = zip(*sorted(zip(names, species)))
return names, species
def get_pysb_reactions():
model = rasmodel.chen_2009.model
pysb_to_sbml = {p.index: s.name for p, s in zip(pysb_species, sbml_species)}
sink_index = next(i for i, s in enumerate(model.species)
if str(s) == '__sink()')
pysb_to_sbml[sink_index] = '(degraded)'
reactions = model.reactions_bidirectional
for i, r in enumerate(reactions):
r['index'] = i
def format_side(indexes):
labels = [pysb_to_sbml[i] for i in indexes]
return ' + '.join(sorted(labels))
def format_param(parameter):
if parameter is not None and parameter.get_value() != 0:
pname = parameter.name
# Fixup for BNG symmetry corrections.
if pname.endswith('_symmetric'):
orig_param = parameter.expr / 2
if not isinstance(orig_param, pysb.Parameter):
raise RuntimeError("Unexpected expression structure")
pname = orig_param.name
return pname
else:
return '<0>'
descriptions = []
for reaction in reactions:
assert len(reaction['rule']) == 1
rule = model.rules[reaction['rule'][0]]
left = reaction['reactants']
right = reaction['products']
kf = rule.rate_forward
kr = rule.rate_reverse
# Swap sides to put smaller species list or '(degraded)' on the right.
if len(left) < len(right) or left == (sink_index,):
left, right = right, left
kf, kr = kr, kf
desc = '%s -> %s {%s, %s}' % (format_side(left), format_side(right),
format_param(kf), format_param(kr))
descriptions.append(desc)
# Sort names and reactions by names.
descriptions, reactions = zip(*sorted(zip(descriptions, reactions)))
return descriptions, reactions
def get_sbml_reactions():
model = rasmodel.chen_2009.original_sbml.model
sinks = tuple(s for s in model.species if s.name in ('c13', 'c520', 'c86'))
_, sbml_species = get_sbml_species()
wanted = sbml_species + sinks
# Skip reactions that aren't fully in our scope.
reactions = [r for r in model.reactions
if all(s in wanted for s in r.reactants + r.products)]
def format_side(species):
labels = [s.name if s not in sinks else '(degraded)' for s in species]
return ' + '.join(sorted(labels))
def format_param(parameter):
return parameter.name if parameter.value != 0 else '<0>'
descriptions = []
for reaction in reactions:
left = reaction.reactants
right = reaction.products
kf = reaction.kf
kr = reaction.kr
desc = '%s -> %s {%s, %s}' % (format_side(left), format_side(right),
format_param(kf), format_param(kr))
descriptions.append(desc)
# Sort names and reactions by names.
descriptions, reactions = zip(*sorted(zip(descriptions, reactions)))
return descriptions, reactions
argparser = argparse.ArgumentParser()
argparser.add_argument('-m', '--print-matches', action='store_true',
help="Print matching elements too (mismatches are "
"always printed)")
args = argparser.parse_args(sys.argv[1:])
pysb_model = rasmodel.chen_2009.model
generate_equations(pysb_model)
rasmodel.chen_2009.original_sbml.load_model()
sbml_model = rasmodel.chen_2009.original_sbml.model
pysb_species_names, pysb_species = get_pysb_species()
sbml_species_names, sbml_species = get_sbml_species()
if len(pysb_species_names) != len(set(pysb_species_names)):
raise RuntimeError("Duplicate pysb species names")
if len(sbml_species_names) != len(set(sbml_species_names)):
raise RuntimeError("Duplicate sbml species names")
species_matches = reaction_matches = 0
fmt = '%-60s\t%1s\t%-51s'
print fmt % ('SBML', '', 'PySB')
print fmt % ('=' * 50, '', '=' * 50)
print
sm = difflib.SequenceMatcher(None, sbml_species_names, pysb_species_names)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag in ('delete', 'replace'):
# Species in sbml model but not pysb model.
for si in range(i1, i2):
ss = '%s : %s' % (sbml_species_names[si], sbml_species[si].name)
print fmt % (ss, '', '')
if tag in ('insert', 'replace'):
# Species in pysb model but not in sbml model.
for sj in range(j1, j2):
ss = '%s : s%d' % (pysb_species_names[sj], sj)
print fmt % ('', '', ss)
if tag == 'equal':
# Species in both.
species_matches += i2 - i1
if args.print_matches:
for si, sj in zip(range(i1, i2), range(j1, j2)):
sbml = '%s : %s' % (sbml_species_names[si], sbml_species[si].name)
pysb = '%s : s%d' % (pysb_species_names[sj], sj)
print fmt % (sbml, '=', pysb)
species_match_percent = species_matches / len(sbml_species_names) * 100
if species_match_percent == 100.0:
pysb_reaction_descs, pysb_reactions = get_pysb_reactions()
sbml_reaction_descs, sbml_reactions = get_sbml_reactions()
sm = difflib.SequenceMatcher(None, sbml_reaction_descs, pysb_reaction_descs)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag in ('delete', 'replace'):
# Reactions in sbml model but not pysb model.
for si in range(i1, i2):
ss = '%s : %s' % (sbml_reaction_descs[si],
sbml_reactions[si].name)
print fmt % (ss, '', '')
if tag in ('insert', 'replace'):
# Reactions in pysb model but not in sbml model.
for sj in range(j1, j2):
ss = '%s : r%d, %s' % (pysb_reaction_descs[sj],
pysb_reactions[sj]['index'],
pysb_reactions[sj]['rule'][0])
print fmt % ('', '', ss)
if tag == 'equal':
# Reactions in both.
reaction_matches += i2 - i1
if args.print_matches:
for si, sj in zip(range(i1, i2), range(j1, j2)):
sbml = '%s : %s' % (sbml_reaction_descs[si],
sbml_reactions[si].name)
pysb = '%s : r%d, %s' % (pysb_reaction_descs[sj],
pysb_reactions[sj]['index'],
pysb_reactions[sj]['rule'][0])
print fmt % (sbml, '=', pysb)
reaction_match_percent = reaction_matches / len(sbml_reactions) * 100
else:
reaction_match_percent = 0
if reaction_match_percent == 100.0:
pysb_parameters = pysb_model.parameters_rules()
sbml_parameters = {}
sbml_model.export_globals(sbml_parameters)
parameter_mismatches = []
for pysb_parameter in pysb_parameters:
# Fixup for BNG symmetry corrections.
pname = pysb_parameter.name.replace('_symmetric', '')
try:
sbml_parameter = sbml_parameters[pname]
except KeyError:
parameter_mismatches.append((pysb_parameter, None))
continue
if pysb_parameter.get_value() != sbml_parameter.value:
parameter_mismatches.append((pysb_parameter, sbml_parameter))
num_param_matches = len(pysb_parameters) - len(parameter_mismatches)
parameter_match_percent = num_param_matches / len(pysb_parameters) * 100
print
print "Species matches: %d / %d -- %.2f%% %s" % (
species_matches, len(sbml_species), species_match_percent,
u'\U0001f37b' if species_match_percent == 100 else ''
)
print "SBML species missed: %d" % (len(sbml_species) - species_matches)
print "PySB surplus species: %d" % (len(pysb_species) - species_matches)
if species_match_percent == 100.0:
print
print "Reaction matches: %d / %d -- %.2f%% %s" % (
reaction_matches, len(sbml_reactions), reaction_match_percent,
u'\U0001f37b' if reaction_match_percent == 100 else ''
)
print "SBML reactions missed: %d" % (len(sbml_reactions) - reaction_matches)
print "PySB surplus reactions: %d" % (len(pysb_reactions) - reaction_matches)
else:
print "\nSkipping reaction comparison until species match is 100%"
if reaction_match_percent == 100.0:
print
print "Parameter value matches: %d / %d -- %.2f%% %s" % (
num_param_matches, len(pysb_parameters), parameter_match_percent,
u'\U0001f37b' if parameter_match_percent == 100 else ''
)
sbml_perbb1_species = set([
85, 86, 110, 111, 119, 120, 121, 122, 153, 154, 156, 157, 158, 159, 160,
161, 162, 163, 166, 167, 169, 170, 172, 173, 175, 176, 188, 189, 202, 205,
206, 207, 208, 209, 210, 211, 212, 213, 216, 217, 218, 219, 220, 221, 222,
223, 224, 225, 226, 227, 228, 229, 231, 232, 234, 235, 236, 237, 238, 239,
240, 241, 246, 247, 251, 252, 253, 254, 255, 256, 257, 258, 263, 264, 270,
271, 272, 273, 274, 275, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 307, 308, 309,
310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 327, 328,
329, 330, 337, 338, 339, 340, 344, 345, 346, 347, 348, 349, 350, 351, 352,
353, 354, 355, 356, 357, 358, 359, 368, 369, 370, 371, 373, 374, 379, 380,
381, 382, 383, 384, 387, 388, 392, 393, 395, 396, 408, 409, 411, 412, 414,
415, 419, 420, 425, 426, 427, 428, 431, 432
])
pysb_perbb1_species = (
set(pysb_model.observables['pErbB1_total'].species) -
set(pysb_model.observables['pErbB11_exceptions'].species) -
set(pysb_model.observables['pErbB12_exceptions'].species)
)
print
if pysb_perbb1_species != sbml_perbb1_species:
print "pErbB1 observable is WRONG:"
print " missing:", ', '.join(str(i) for i in sbml_perbb1_species - pysb_perbb1_species)
print " extra:", ', '.join(str(i) for i in pysb_perbb1_species - sbml_perbb1_species)
else:
print "pErbB1 observable is correct"
sbml2pysb = {x[0].name: x[1].index
for x in sorted(zip(sbml_species, pysb_species),
key=lambda x: int(x[0].name[1:]))}
pysb2sbml = {x[1].index: x[0].name
for x in sorted(zip(sbml_species, pysb_species),
key=lambda x: x[1].index)}
|
try:
from array import array
except ImportError:
import sys
print("SKIP")
sys.exit()
print(array('b', (1, 2)))
print(array('h', [1, 2]))
print(array('h', b'22')) # should be byteorder-neutral
print(array('h', bytearray(2)))
print(array('i', bytearray(4)))
print(array('H', array('b', [1, 2])))
print(array('b', array('I', [1, 2])))
|
"""
homeassistant.util
~~~~~~~~~~~~~~~~~~
Helper methods for various modules.
"""
import collections
from itertools import chain
import threading
import queue
from datetime import datetime
import re
import enum
import socket
import random
import string
from functools import wraps
from .dt import datetime_to_local_str, utcnow
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
def sanitize_filename(filename):
""" Sanitizes a filename by removing .. / and \\. """
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path):
""" Sanitizes a path by removing ~ and .. """
return RE_SANITIZE_PATH.sub("", path)
def slugify(text):
""" Slugifies a given text. """
text = text.lower().replace(" ", "_")
return RE_SLUGIFY.sub("", text)
def split_entity_id(entity_id):
""" Splits a state entity_id into domain, object_id. """
return entity_id.split(".", 1)
def repr_helper(inp):
""" Helps creating a more readable string representation of objects. """
if isinstance(inp, dict):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
elif isinstance(inp, datetime):
return datetime_to_local_str(inp)
else:
return str(inp)
def convert(value, to_type, default=None):
""" Converts value to to_type, returns default if fails. """
try:
return default if value is None else to_type(value)
except ValueError:
# If value could not be converted
return default
def ensure_unique_string(preferred_string, current_strings):
""" Returns a string that is not present in current_strings.
If preferred string exists will append _2, _3, .. """
test_string = preferred_string
current_strings = set(current_strings)
tries = 1
while test_string in current_strings:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
def get_local_ip():
""" Tries to determine the local IP address of the machine. """
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0]
except socket.error:
return socket.gethostbyname(socket.gethostname())
finally:
sock.close()
def get_random_string(length=10):
""" Returns a random string with letters and digits. """
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
""" Taken from Python 3.4.0 docs. """
# pylint: disable=no-init, too-few-public-methods
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class OrderedSet(collections.MutableSet):
""" Ordered set taken from http://code.activestate.com/recipes/576694/ """
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
""" Add an element to the end of the set. """
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def promote(self, key):
""" Promote element to beginning of the set, add if not there. """
if key in self.map:
self.discard(key)
begin = self.end[2]
curr = begin[1]
curr[2] = begin[1] = self.map[key] = [key, curr, begin]
def discard(self, key):
""" Discard an element from the set. """
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True): # pylint: disable=arguments-differ
""" Pops element of the end of the set.
Set last=False to pop from the beginning. """
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def update(self, *args):
""" Add elements from args to the set. """
for item in chain(*args):
self.add(item)
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Throttle(object):
"""
A method decorator to add a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
# pylint: disable=too-few-public-methods
def __init__(self, min_time, limit_no_throttle=None):
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method):
lock = threading.Lock()
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
@wraps(method)
def wrapper(*args, **kwargs):
"""
Wrapper that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
if not lock.acquire(False):
return None
try:
last_call = wrapper.last_call
# Check if method is never called or no_throttle is given
force = not last_call or kwargs.pop('no_throttle', False)
if force or utcnow() - last_call > self.min_time:
result = method(*args, **kwargs)
wrapper.last_call = utcnow()
return result
else:
return None
finally:
lock.release()
wrapper.last_call = None
return wrapper
class ThreadPool(object):
""" A priority queue-based thread pool. """
# pylint: disable=too-many-instance-attributes
def __init__(self, job_handler, worker_count=0, busy_callback=None):
"""
job_handler: method to be called from worker thread to handle job
worker_count: number of threads to run that handle jobs
busy_callback: method to be called when queue gets too big.
Parameters: worker_count, list of current_jobs,
pending_jobs_count
"""
self._job_handler = job_handler
self._busy_callback = busy_callback
self.worker_count = 0
self.busy_warning_limit = 0
self._work_queue = queue.PriorityQueue()
self.current_jobs = []
self._lock = threading.RLock()
self._quit_task = object()
self.running = True
for _ in range(worker_count):
self.add_worker()
def add_worker(self):
""" Adds a worker to the thread pool. Resets warning limit. """
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
worker = threading.Thread(target=self._worker)
worker.daemon = True
worker.start()
self.worker_count += 1
self.busy_warning_limit = self.worker_count * 3
def remove_worker(self):
""" Removes a worker from the thread pool. Resets warning limit. """
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(0, self._quit_task))
self.worker_count -= 1
self.busy_warning_limit = self.worker_count * 3
def add_job(self, priority, job):
""" Add a job to the queue. """
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(priority, job))
# check if our queue is getting too big
if self._work_queue.qsize() > self.busy_warning_limit \
and self._busy_callback is not None:
# Increase limit we will issue next warning
self.busy_warning_limit *= 2
self._busy_callback(
self.worker_count, self.current_jobs,
self._work_queue.qsize())
def block_till_done(self):
""" Blocks till all work is done. """
self._work_queue.join()
def stop(self):
""" Stops all the threads. """
with self._lock:
if not self.running:
return
# Ensure all current jobs finish
self.block_till_done()
# Tell the workers to quit
for _ in range(self.worker_count):
self.remove_worker()
self.running = False
# Wait till all workers have quit
self.block_till_done()
def _worker(self):
""" Handles jobs for the thread pool. """
while True:
# Get new item from work_queue
job = self._work_queue.get().item
if job == self._quit_task:
self._work_queue.task_done()
return
# Add to current running jobs
job_log = (utcnow(), job)
self.current_jobs.append(job_log)
# Do the job
self._job_handler(job)
# Remove from current running job
self.current_jobs.remove(job_log)
# Tell work_queue the task is done
self._work_queue.task_done()
class PriorityQueueItem(object):
""" Holds a priority and a value. Used within PriorityQueue. """
# pylint: disable=too-few-public-methods
def __init__(self, priority, item):
self.priority = priority
self.item = item
def __lt__(self, other):
return self.priority < other.priority
|
"""
Generate trace/generated-helpers-wrappers.h.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2016, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
from tracetool.transform import *
import tracetool.vcpu
def generate(events, backend):
events = [e for e in events
if "disable" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#define tcg_temp_new_nop(v) (v)',
'#define tcg_temp_free_nop(v)',
'',
)
for e in events:
if "tcg-exec" not in e.properties:
continue
# tracetool.generate always transforms types to host
e_args = tracetool.vcpu.transform_args("tcg_helper_c", e.original, "wrapper")
# mixed-type to TCG helper bridge
args_tcg_compat = e_args.transform(HOST_2_TCG_COMPAT)
code_new = [
"%(tcg_type)s __%(name)s = %(tcg_func)s(%(name)s);" %
{"tcg_type": transform_type(type_, HOST_2_TCG),
"tcg_func": transform_type(type_, HOST_2_TCG_TMP_NEW),
"name": name}
for (type_, name) in args_tcg_compat
]
code_free = [
"%(tcg_func)s(__%(name)s);" %
{"tcg_func": transform_type(type_, HOST_2_TCG_TMP_FREE),
"name": name}
for (type_, name) in args_tcg_compat
]
gen_name = "gen_helper_" + e.api()
out('static inline void %(name)s(%(args)s)',
'{',
' %(code_new)s',
' %(proxy_name)s(%(tmp_names)s);',
' %(code_free)s',
'}',
name=gen_name,
args=e_args,
proxy_name=gen_name + "_proxy",
code_new="\n ".join(code_new),
code_free="\n ".join(code_free),
tmp_names=", ".join(["__%s" % name for _, name in e_args]),
)
|
from dasbus.server.interface import dbus_interface
from dasbus.server.property import emits_properties_changed
from dasbus.typing import * # pylint: disable=wildcard-import
from pyanaconda.modules.common.base import KickstartModuleInterfaceTemplate
from pyanaconda.modules.common.constants.objects import DISK_SELECTION
from pyanaconda.modules.common.structures.validation import ValidationReport
@dbus_interface(DISK_SELECTION.interface_name)
class DiskSelectionInterface(KickstartModuleInterfaceTemplate):
"""DBus interface for the disk selection module."""
def connect_signals(self):
"""Connect the signals."""
super().connect_signals()
self.watch_property("SelectedDisks", self.implementation.selected_disks_changed)
self.watch_property("ExclusiveDisks", self.implementation.exclusive_disks_changed)
self.watch_property("IgnoredDisks", self.implementation.ignored_disks_changed)
self.watch_property("ProtectedDevices", self.implementation.protected_devices_changed)
self.watch_property("DiskImages", self.implementation.disk_images_changed)
@property
def SelectedDisks(self) -> List[Str]:
"""The list of selected disks."""
return self.implementation.selected_disks
@emits_properties_changed
def SetSelectedDisks(self, drives: List[Str]):
"""Set the list of selected disks.
Specifies those disks that anaconda can use for
partitioning, formatting, and clearing.
:param drives: a list of drives names
"""
self.implementation.set_selected_disks(drives)
def ValidateSelectedDisks(self, drives: List[Str]) -> Structure:
"""Validate the list of selected disks.
:param drives: a list of drives names
:return: a validation report
"""
return ValidationReport.to_structure(
self.implementation.validate_selected_disks(drives)
)
@property
def ExclusiveDisks(self) -> List[Str]:
"""The list of drives to scan."""
return self.implementation.exclusive_disks
@emits_properties_changed
def SetExclusiveDisks(self, drives: List[Str]):
"""Set the list of drives to scan.
Specifies those disks that anaconda will scan during
the storage reset. If the list is empty, anaconda will
scan all drives.
It can be set from the kickstart with 'ignoredisk --onlyuse'.
:param drives: a list of drives names
"""
self.implementation.set_exclusive_disks(drives)
@property
def IgnoredDisks(self) -> List[Str]:
"""The list of ignored disks."""
return self.implementation.ignored_disks
@emits_properties_changed
def SetIgnoredDisks(self, drives: List[Str]):
"""Set the list of ignored disks.
Specifies those disks that anaconda should not touch
when it does partitioning, formatting, and clearing.
:param drives: a list of drive names
"""
self.implementation.set_ignored_disks(drives)
@property
def ProtectedDevices(self) -> List[Str]:
"""The list of devices to protect."""
return self.implementation.protected_devices
@emits_properties_changed
def SetProtectedDevices(self, devices: List[Str]):
"""Set the list of protected devices.
Specifies those disks that anaconda should protect.
:param devices: a list of device names
"""
self.implementation.set_protected_devices(devices)
@property
def DiskImages(self) -> Dict[Str, Str]:
"""The dictionary of disk images."""
return self.implementation.disk_images
@emits_properties_changed
def SetDiskImages(self, disk_images: Dict[Str, Str]):
"""Set the dictionary of disk images.
:param disk_images: a dictionary of image names and file names
"""
self.implementation.set_disk_images(disk_images)
def GetUsableDisks(self) -> List[Str]:
"""Get a list of disks that can be used for the installation.
:return: a list of disk names
"""
return self.implementation.get_usable_disks()
|
"""Module to provide skill recommendations and non-linear course navigation."""
__author__ = 'Boris Roussev (borislavr@google.com)'
class BaseSkillRecommender(object):
"""Base class to model the behavior of a skill recommendation algorithm."""
def __init__(self, skill_map):
self._skill_map = skill_map
def recommend(self):
"""Recommend a user a prioritized list of skills to learn."""
raise NotImplementedError()
class TopoSkillRecommender(BaseSkillRecommender):
"""Recommend skills with satisfied prerequisites in toposort."""
def recommend(self):
# skills with high and medium proficiency scores
learned = []
# skills with learned or empty set of predecessors
recommended = []
for skill in self._skill_map.skills(sort_by='prerequisites'):
if skill.proficient:
learned.append(skill)
elif all(x.proficient for x in skill.prerequisites):
recommended.append(skill)
return recommended, learned
class SkillRecommender(object):
"""Static factory for skill recommenders."""
@staticmethod
def instance(skill_map, type_name=None):
type_name = type_name or 'TopoSkillRecommender'
if type_name == 'TopoSkillRecommender':
return TopoSkillRecommender(skill_map)
raise AssertionError('Unexpected recommender: %s.' % type_name)
|
import time
import sys
import pygame
import pygame.image
import pygasus
def main():
pygame.init()
hScreen=pygame.display.set_mode((256,240))
pygasus.read_ines(sys.argv[1])
pygasus.pReset()
pygasus.setkeys({
'q': pygame.K_q, 'w': pygame.K_w, 'a': pygame.K_a, 's': pygame.K_s,
'UP': pygame.K_UP, 'DOWN': pygame.K_DOWN, 'LEFT': pygame.K_LEFT, 'RIGHT': pygame.K_RIGHT,
})
counter = 0
total = 0
while True:
counter += 1
pygame.event.poll()
keys=pygame.key.get_pressed()
pygasus.setkeys2(keys)
if keys[pygame.K_ESCAPE]: break
if keys[pygame.K_F11]: pygame.display.toggle_fullscreen()
if keys[pygame.K_SPACE]:
pygame.time.delay(100)
continue
if keys[pygame.K_RETURN]:
pygasus.tpF()
t0 = time.time()
pygasus.pExec()
data = pygasus.getscreen()
image = pygame.image.frombuffer(data, ( 256, 240 ), 'RGB')
hScreen.blit(image, (0,0))
pygame.display.flip()
hScreen.fill([0,0,0])
total += (time.time()-t0)
if counter % 60 == 0:
print 'FPS:', 60/total
total = 0
if __name__ == '__main__':
main()
|
from os import path, makedirs, walk
from subprocess import Popen
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from threading import Thread
from datetime import datetime
from Queue import Queue
from time import sleep
from sys import exit, stdout, exc_info
from pprint import pprint
import logging
from libnmap.parser import NmapParser
from ghost import Ghost
class UrlManager(object):
@staticmethod
def files_from_dir(directory, ext=None):
print('Parsing dir: %s' % directory)
file_list = []
for root, dirs, files in walk(directory):
for filename in files:
fname = path.join(root, filename)
#print(fname)
if ext is None or fname.lower().endswith(ext):
file_list.append(fname)
print('Files found:')
for f in file_list:
print(f)
return file_list
@staticmethod
def urls_from_file(filename):
if not path.exists(filename):
return
return filter(lambda x: x.find('http') != -1, open(filename, 'rb').read().split('\n'))
@staticmethod
def urls_from_dir(directory):
files = UrlManager.files_from_dir(directory)
urls = []
for f in files:
us = UrlManager.urls_from_file(f)
urls.extend(us)
return urls
@staticmethod
def urls_from_nmap_xml(nmap_file):
if not path.exists(nmap_file):
return
report = NmapParser.parse_fromfile(nmap_file)
urls = []
for host in report.hosts:
#Skip hosts with errors
if host.address.find(':') != -1:
continue
if len(host.hostnames):
tmp_host = host.hostnames.pop()
else:
tmp_host = host.address
# print("Nmap scan report for {0} ({1})".format(tmp_host, host.address))
# print("Host is {0}.".format(host.status))
# print(" PORT STATE SERVICE")
for serv in host.services:
# pserv = "{0:>5s}/{1:3s} {2:12s} {3}".format(
# str(serv.port),
# serv.protocol,
# serv.state,
# serv.service)
# if len(serv.banner):
# pserv += " ({0})".format(serv.banner)
#print(pserv)
svc = serv.service.lower()
if serv.state == 'open' and svc.find('http') != -1:
if svc.find('ssl'):
proto = 'https'
else:
proto = 'http'
urls.append('%s://%s:%i/' % (proto, host.address, serv.port))
if tmp_host != host.address:
urls.append('%s://%s:%i/' % (proto, tmp_host, serv.port))
return urls
@staticmethod
def urls_from_nmap_dir(directory):
files = UrlManager.files_from_dir(directory, '.xml')
urls = []
for f in files:
us = UrlManager.urls_from_nmap_xml(f)
urls.extend(us)
return urls
class Shotter(object):
def __init__(self,):
try:
#self.ghost = Ghost(wait_timeout=10)
self.ghost = Ghost()
except ImportError:
self.ghost = None
def cuty_shot(self, url, filename, x11=True, width=1024, height=768, colorbits=24):
#TODO: add check if cutycapt installed
if x11:
cmd = 'cutycapt --url="%s" --out=%s' % (url, filename)
else:
cmd = 'xvfb-run --server-args="-screen 0, %ix%ix%i" cutycapt --url="%s" --out=%s' % \
(url, filename, width, height, colorbits)
try:
Popen(cmd, shell=True).wait()
return True
except:
return False
def ghost_shot(self, url, filename, ignore_errors=True):
#print('ghost_shot(%s)' % url)
try:
page, resources = self.ghost.open(url)
if ignore_errors:
self.ghost.capture_to(filename)
return True
elif page.http_status == 200 and page.totalBytes() != 0:
self.ghost.capture_to(filename)
return True
else:
return False
except:
print(exc_info())
return False
def screenshot(self, url, filename, overwrite=False):
if path.exists(filename) and not overwrite:
print('%s exists, skipping' % filename)
return
print('[SCREENSHOT] %s -> %s' % (url, filename))
if self.ghost is not None:
self.ghost_shot(url, filename)
else:
self.cuty_shot(url, filename)
class ShotterThread(Thread):
def __init__(self, queue, output, prefix=None):
super(ShotterThread, self).__init__()
self.queue, self.output, self.prefix = queue, output, prefix
self.daemon = True
self.shotter = Shotter()
def url_to_filename(self, url):
filename = '%s.png' % url.replace('://', '_').replace('/', '').replace(':', '_')
if self.prefix is not None:
filename = self.prefix + filename
return path.join(self.output, filename)
def run(self, ):
while True:
url = self.queue.get()
filename = self.url_to_filename(url)
self.shotter.screenshot(url, filename)
self.queue.task_done()
class MassShotter():
def __init__(self, urls, output, prefix=None, thread_count=5):
self.urls = urls
self.output = output
self.prefix = prefix
self.thread_count = thread_count
self.queue = Queue()
self.threads = []
def run(self):
if not path.exists(self.output):
makedirs(self.output)
print('Filling queue with %i urls and deduplicating' % len(self.urls))
seen = set()
seen_add = seen.add
for url in [x for x in urls if x not in seen and not seen_add(x)]:
self.queue.put(url)
init_size = self.queue.qsize()
print('Only %i urls to screen.' % init_size)
# Fill threads list
for i in xrange(0, self.thread_count):
t = ShotterThread(self.queue, self.output)
self.threads.append(t)
# Start all threads
[x.start() for x in self.threads]
# Wait for all of them to finish
[x.join() for x in self.threads]
# for i in xrange(0, self.thread_count):
# t = ShotterThread(self.queue, self.output)
# t.start()
#
# current_count = 0
# while not self.queue.empty():
# q = init_size - self.queue.qsize()
# stdout.write("\r%i/%i urls screened. Screenshot speed %i per sec. " % \
# (q, init_size, q - current_count))
# stdout.flush()
# current_count = q
# sleep(1)
if __name__ == '__main__':
parser = ArgumentParser(description='screenz.py - small screenshot script for nmap report', formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--nmap', help='nmap xml report file or directory')
parser.add_argument('-u', '--urls', help='url list files or dirs')
parser.add_argument('-p', '--prefix', default=None, help='prefix for output')
parser.add_argument('-o', '--output', default='output', help='output directory')
parser.add_argument('-d', '--debug', action='store_true', help='debug mode')
parser.add_argument('-t', '--threads', type=int, default=5, help='threads count')
# parser.add_argument('-T', '--imeout', type=int, help='timeout in seconds')
parser.add_argument('-v', action='version', version='%(prog)s 0.2')
args = parser.parse_args()
pprint(args)
if args.debug:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
#if 'nmap' not in args and 'url' not in args:
if args.nmap is None and args.urls is None:
parser.print_help()
print('Please use -n or -u with argument!')
exit(1)
start_time = datetime.now()
urls = []
um = UrlManager()
if args.nmap is not None:
if path.isfile(args.nmap):
urls.extend(um.urls_from_nmap_xml(args.nmap))
elif path.isdir(args.nmap):
urls.extend(um.urls_from_nmap_dir(args.nmap))
if args.urls is not None:
if path.isfile(args.urls):
urls.extend(um.urls_from_file(args.urls))
elif path.isdir(args.urls):
urls.extend(um.urls_from_dir(args.urls))
print('Urls:')
for u in urls:
pprint(u)
print('Start screenshoting. Press Ctrl+C to abort.')
try:
msh = MassShotter(urls, args.output, args.prefix, args.threads)
msh.run()
except KeyboardInterrupt:
print('Screenshoting aborted.')
exit(1)
print "Start time: " + start_time.strftime('%Y-%m-%d %H:%M:%S')
print "Finish time: " + datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: xAgeSDLBoolToggleDependent
Age: global
Date: April 2003
Author: Bill Slease
toggles an age sdl bool only if another age sdl bool is true
"""
from Plasma import *
from PlasmaTypes import *
import string
actTrigger = ptAttribActivator(1,"Activator")
stringVarEnabler = ptAttribString(2,"AgeSDL Enabler") # e.g. tldnWorkroomPowerOn
stringVarTarget = ptAttribString(3,"AgeSDL Var To Change") # e.g. tldnLight01On
stringInfo = ptAttribString(4,"Extra info to pass along") # string passed as hint to listeners if needed (e.g. which side of the door did the player click on?)
boolCurrentValue = false
class xAgeSDLBoolToggleDependent(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 5040
self.version = 1
def OnFirstUpdate(self):
if not (type(stringVarEnabler.value) == type("") and stringVarEnabler.value != ""):
PtDebugPrint("ERROR: xAgeSDLBoolToggleDependent.OnFirstUpdate():\tERROR: missing SDLEnabler var name")
if not (type(stringVarTarget.value) == type("") and stringVarTarget.value != ""):
PtDebugPrint("ERROR: xAgeSDLBoolToggleDependent.OnFirstUpdate():\tERROR: missing SDLTarget var name")
def OnServerInitComplete(self):
global boolCurrentValue
ageSDL = PtGetAgeSDL()
ageSDL.setFlags(stringVarTarget.value,1,1)
ageSDL.sendToClients(stringVarTarget.value)
ageSDL.setNotify(self.key,stringVarTarget.value,0.0)
try:
boolCurrentValue = ageSDL[stringVarTarget.value][0]
except:
PtDebugPrint("ERROR: xAgeSDLBoolToggleDependent.OnServerInitComplete():\tERROR reading age SDL")
pass
PtDebugPrint("DEBUG: xAgeSDLBoolToggleDependent.OnServerInitComplete():\t%s = %d, %s = %d" % (stringVarEnabler.value,ageSDL[stringVarEnabler.value][0],stringVarTarget.value,boolCurrentValue) )
def OnNotify(self,state,id,events):
global boolCurrentValue
# is this notify something I should act on?
if not state or id != actTrigger.id:
return
if not PtWasLocallyNotified(self.key):
return
else:
PtDebugPrint("DEBUG: xAgeSDLBoolToggleDependent.OnNotify():\t local player requesting %s change via %s" % (stringVarTarget.value,actTrigger.value[0].getName()) )
ageSDL = PtGetAgeSDL()
# Toggle the sdl value if enabled
if not ageSDL[stringVarEnabler.value][0]:
return
if boolCurrentValue:
boolCurrentValue = false
ageSDL.setTagString(stringVarTarget.value,stringInfo.value)
else:
boolCurrentValue = true
ageSDL.setTagString(stringVarTarget.value,stringInfo.value)
ageSDL[stringVarTarget.value] = (boolCurrentValue,)
PtDebugPrint("DEBUG: xAgeSDLBoolToggleDependent.OnNotify():\tset age SDL var %s to %d" % (stringVarTarget.value,boolCurrentValue) )
# in case someone other than me changes my var(s)
def OnSDLNotify(self,VARname,SDLname,playerID,tag):
global boolCurrentValue
ageSDL = PtGetAgeSDL()
if VARname == stringVarTarget.value:
PtDebugPrint("DEBUG: xAgeSDLBoolToggleDependent.OnSDLNotify():\t VARname:%s, SDLname:%s, tag:%s, value:%d" % (VARname,SDLname,tag,ageSDL[stringVarTarget.value][0]))
boolCurrentValue = ageSDL[stringVarTarget.value][0]
|
from odoo import _, api, models
from odoo.exceptions import UserError
class AccountTax(models.Model):
_inherit = 'account.tax'
def write(self, vals):
forbidden_fields = set([
'amount_type', 'amount', 'type_tax_use', 'tax_group_id', 'price_include',
'include_base_amount'
])
if forbidden_fields & set(vals.keys()):
tax_ids = self.env['pos.order.line'].sudo().search([
('order_id.session_id.state', '!=', 'closed')
]).read(['tax_ids'])
# Flatten the list of taxes, see https://stackoverflow.com/questions/952914
tax_ids = set([i for sl in [t['tax_ids'] for t in tax_ids] for i in sl])
if tax_ids & set(self.ids):
raise UserError(_(
'It is forbidden to modify a tax used in a POS order not posted. ' +
'You must close the POS sessions before modifying the tax.'
))
return super(AccountTax, self).write(vals)
def get_real_tax_amount(self):
tax_list = []
for tax in self:
tax_repartition_lines = tax.invoice_repartition_line_ids.filtered(lambda x: x.repartition_type == 'tax')
total_factor = sum(tax_repartition_lines.mapped('factor'))
real_amount = tax.amount * total_factor
tax_list.append({'id': tax.id, 'amount': real_amount})
return tax_list
|
"""Model code for the Transaction Log."""
import calendar
import json
import os
from functools import wraps
from storm.expr import Join, LeftJoin
from storm.locals import Int, DateTime, Enum, Store, Unicode
from storm.store import AutoReload
from config import config
from backends.filesync.data.dbmanager import get_shard_store, get_user_store
from backends.filesync.data.model import (
STATUS_LIVE,
Share,
StorageObject,
StorageUser,
UserVolume,
get_path_startswith,
)
from backends.tools.properties import StormUUID
def get_epoch_secs(dt):
"""Get the seconds since epoch"""
return calendar.timegm(dt.timetuple())
def _get_user_id(obj):
"""Return the DB ID of the given object's owner.
If the given object is a StorageUser, then its ID is returned.
"""
if isinstance(obj, (StorageObject, UserVolume)):
user_id = obj.owner_id
elif isinstance(obj, Share):
user_id = obj.shared_by
elif isinstance(obj, StorageUser):
user_id = obj.id
else:
raise AssertionError("Unknown object type: %s" % obj)
return user_id
def _does_user_id_end_in(user_id, ending_range):
"""Return True if the user ID ends in the given range."""
low, high = ending_range.split('-')
if (int(str(user_id)[-len(low):]) >= int(low) and
int(str(user_id)[-len(high):]) <= int(high)):
return True
return False
def skip_if_txlog_not_enabled(func):
"""A function decorator that does not call the function if txlog is not
enabled to the owner of the object passed as the first argument.
We get the owner ID of the first argument passed when the method is
called (args[1], because args[0] is the instance to which the method is
bound) and check the configs to see if txlog is enabled for that user. If
it is, we just call the wrapped function with the given arguments,
otherwise we return None.
"""
@wraps(func)
def wrapper(*args, **kwargs):
user_id = _get_user_id(args[1])
enabled_to_users = config.txlog.enabled_to_user_ids
enabled_to_ending_range = config.txlog.enabled_to_user_ids_ending_in
run = False
if enabled_to_users:
enabled_to_users = map(int, enabled_to_users.split(','))
if user_id in enabled_to_users:
run = True
if enabled_to_ending_range:
if _does_user_id_end_in(user_id, enabled_to_ending_range):
run = True
if run:
return func(*args, **kwargs)
return None
return wrapper
class TransactionLog(object):
"""The log of an operation performed on a node."""
# Constants; may want to move somewhere else.
OP_DELETE = u'delete'
OP_MOVE = u'move'
OP_PUT_CONTENT = u'put_content'
OP_SHARE_ACCEPTED = u'share_accepted'
OP_SHARE_DELETED = u'share_deleted'
OP_PUBLIC_ACCESS_CHANGED = u'public_access_changed'
OP_USER_CREATED = u'user_created'
OP_UDF_CREATED = 'udf_created'
OP_UDF_DELETED = 'udf_deleted'
OPERATIONS_MAP = {
OP_DELETE: 0,
OP_MOVE: 1,
OP_PUT_CONTENT: 2,
OP_SHARE_ACCEPTED: 3,
OP_SHARE_DELETED: 4,
OP_PUBLIC_ACCESS_CHANGED: 5,
OP_USER_CREATED: 6,
OP_UDF_CREATED: 7,
OP_UDF_DELETED: 8,
}
OPERATIONS_REVERSED_MAP = dict(
[tuple(reversed(item)) for item in OPERATIONS_MAP.items()])
__storm_table__ = "txlog.transaction_log"
id = Int(primary=True)
# Most operations we care about are on nodes, but this can be None for
# things like OP_USER_CREATED.
node_id = StormUUID()
# The volume where the node is; can also be None in some cases.
volume_id = StormUUID()
# The ID of the node's owner if this is an operation on a Node, or the ID
# of the newly created user if it's a OP_USER_CREATED.
owner_id = Int(allow_none=False)
op_type = Enum(map=OPERATIONS_MAP, allow_none=False)
path = Unicode()
generation = Int()
timestamp = DateTime(allow_none=False, default=AutoReload)
mimetype = Unicode()
extra_data = Unicode()
# Only used when representing a move.
old_path = Unicode()
def __init__(self, node_id, owner_id, volume_id, op_type, path, mimetype,
generation=None, old_path=None, extra_data=None):
self.node_id = node_id
self.owner_id = owner_id
self.volume_id = volume_id
self.op_type = op_type
self.path = path
self.generation = generation
self.mimetype = mimetype
self.old_path = old_path
self.extra_data = extra_data
@property
def extra_data_dict(self):
"""A dictionary obtained by json.load()ing self.extra_data, or None if
self.extra_data is None.
"""
if self.extra_data is None:
return self.extra_data
return json.loads(self.extra_data)
@classmethod
def bootstrap(cls, user):
store = get_shard_store(user.shard_id)
cls.record_user_created(user)
# Number of TransactionLog rows we inserted.
rows = 1
for udf in store.find(UserVolume, owner_id=user.id,
status=STATUS_LIVE):
cls.record_udf_created(udf)
rows += 1
# If this becomes a problem it can be done as a single INSERT, but
# we'd need to duplicate the get_public_file_url() in plpython.
udf_join = Join(
StorageObject,
UserVolume, StorageObject.volume_id == UserVolume.id)
conditions = [StorageObject.kind == 'Directory',
StorageObject.owner_id == user.id,
StorageObject.status == STATUS_LIVE,
StorageObject._publicfile_id != None, # NOQA
UserVolume.status == STATUS_LIVE]
dirs = store.using(udf_join).find(StorageObject, *conditions)
for directory in dirs:
cls.record_public_access_change(directory)
rows += 1
# XXX: If this takes too long it will get killed by the transaction
# watcher. Need to check what's the limit we could have here.
# Things to check:
# * If it still takes too long, we could find out the IDs of the
# people who have a lot of music/photos, run it just for them with
# the transaction watcher disabled and then run it for everybody
# else afterwards.
query = """
INSERT INTO txlog.transaction_log (
node_id, owner_id, volume_id, op_type, path, generation,
mimetype, extra_data)
SELECT O.id, O.owner_id, O.volume_id, ?,
txlog.path_join(O.path, O.name), O.generation, O.mimetype,
txlog.get_extra_data_to_recreate_file_1(
kind, size, storage_key, publicfile_id,
public_uuid, content_hash,
extract(epoch from O.when_created at time zone 'UTC'),
extract(epoch
from O.when_last_modified at time zone 'UTC'),
UserDefinedFolder.path
) as extra_data
FROM Object as O
JOIN UserDefinedFolder on UserDefinedFolder.id = O.volume_id
LEFT JOIN ContentBlob on ContentBlob.hash = O.content_hash
WHERE
O.kind != 'Directory'
AND O.owner_id = ?
AND O.status = 'Live'
AND UserDefinedFolder.status = 'Live'
"""
params = (cls.OPERATIONS_MAP[cls.OP_PUT_CONTENT], user.id)
rows += store.execute(query, params=params).rowcount
# Cannot create TransactionLogs for Shares in a single INSERT like
# above because TransactionLogs and Shares live in separate databases.
share_join = LeftJoin(
Share, StorageUser, Share.shared_to == StorageUser.id)
conditions = [Share.shared_by == user.id,
Share.status == STATUS_LIVE,
Share.accepted == True] # NOQA
shares = get_user_store().using(share_join).find(Share, *conditions)
for share in shares:
cls.record_share_accepted(share)
rows += 1
return rows
@classmethod
@skip_if_txlog_not_enabled
def record_udf_created(cls, udf):
"""Create a TransactionLog representing a new UserVolume."""
when_created = get_epoch_secs(udf.when_created)
extra_data = json.dumps(dict(when_created=when_created))
txlog = cls(
None, udf.owner_id, udf.id, cls.OP_UDF_CREATED,
udf.path, mimetype=None, generation=udf.generation,
extra_data=extra_data.decode('ascii'))
return Store.of(udf).add(txlog)
@classmethod
@skip_if_txlog_not_enabled
def record_udf_deleted(cls, udf):
"""Create TransactionLogs representing a UserVolume deleted.
This will create one TransactionLog for the deletion of the UserVolume
itself (op_type=OP_UDF_DELETED) and then create TransactionLogs for
the removal of every descendant of it. The latter part is similar to
unlinking a directory tree where the top of the tree is the
UserVolume's root node.
Note that when a UserVolume is deleted its generation is increased but
the generation of its children are not, so we use the UserVolume's
generation in all TransactionLogs created.
"""
rows = 1
Store.of(udf).add(cls(
None, udf.owner_id, udf.id, cls.OP_UDF_DELETED,
udf.path, mimetype=None, generation=udf.generation))
rows += cls._record_unlink_tree(udf.root_node, udf.generation)
return rows
@classmethod
@skip_if_txlog_not_enabled
def record_user_created(cls, user):
"""Create a TransactionLog entry representing a new user.
We abuse the TransactionLog table to store the details of newly
created users because our derived services need information about
users as well as their files.
A TransactionLog representing a newly created user will have
no node_id, volume_id, generation or path. And its owner_id will be
the ID of the newly created user.
"""
extra_data = json.dumps(dict(
name=user.username, visible_name=user.visible_name))
txlog = cls(
None, user.id, None, cls.OP_USER_CREATED, None, None,
extra_data=extra_data.decode('ascii'))
store = get_shard_store(user.shard_id)
return store.add(txlog)
@classmethod
@skip_if_txlog_not_enabled
def record_public_access_change(cls, node):
"""Create a TransactionLog entry representing a change in a
node's public accessibility.
Currently we only record TransactionLogs for directories that are made
public/private, so if the given node is not a directory we'll return
None without storing a TransactionLog.
@param node: The StorageObject that was made public/private.
@return: The newly created TransactionLog.
"""
extra_data = json.dumps(
cls._get_extra_data_for_new_node(node, node.volume.path))
txlog = cls(
node.id, node.owner_id, node.volume_id,
cls.OP_PUBLIC_ACCESS_CHANGED, node.full_path, node.mimetype,
generation=node.generation, extra_data=extra_data.decode('ascii'))
return Store.of(node).add(txlog)
@classmethod
@skip_if_txlog_not_enabled
def record_put_content(cls, node):
"""Create a TransactionLog entry representing a PUT_CONTENT operation.
@param node: The StorageObject which points to the content uploaded.
@return: The newly created TransactionLog.
"""
extra_data = json.dumps(
cls._get_extra_data_for_new_node(node, node.volume.path))
txlog = cls(
node.id, node.owner_id, node.volume_id, cls.OP_PUT_CONTENT,
node.full_path, node.mimetype, generation=node.generation,
extra_data=extra_data.decode('ascii'))
return Store.of(node).add(txlog)
@classmethod
def _get_extra_data_for_new_node(cls, node, volume_path):
"""A dict containing the extra data needed to re-create this node.
@param node: Could be a StorageObject or a StorageNode(DAO)
@param volume_path: the path of the node's volume
This includes the kind, size, storage_key, publicfile_id, public_uuid,
content_hash and creation date of the given node.
It is supposed to be included in the extra_data of all TransactionLogs
representing operations on nodes so that the node can be created even
if messages arrive out of order on the service workers (e.g. a move
txlog being processed before the txlog representing the file
creation).
The volume_path is passed in separately since getting it now would
require another db transaction. The transaction management for this
method is unclear.
"""
public_uuid = node.public_uuid
if public_uuid is not None:
public_uuid = unicode(public_uuid)
when_created = get_epoch_secs(node.when_created)
last_modified = get_epoch_secs(node.when_last_modified)
d = dict(publicfile_id=node.publicfile_id, public_uuid=public_uuid,
when_created=when_created,
last_modified=last_modified, kind=node.kind,
volume_path=volume_path)
if node.kind == 'File':
d['content_hash'] = node.content_hash
d['size'] = getattr(node.content, 'size', None)
storage_key = getattr(node.content, 'storage_key', None)
if storage_key is not None:
storage_key = unicode(storage_key)
d['storage_key'] = storage_key
return d
@classmethod
def record_share_accepted(cls, share):
"""Create a TransactionLog entry representing a share being accepted.
@param share: The Share which was accepted.
@return: The newly created TransactionLog.
"""
cls._record_share_accepted_or_deleted(share, cls.OP_SHARE_ACCEPTED)
@classmethod
def record_share_deleted(cls, share):
"""Create a TransactionLog entry representing a share being deleted.
@param share: The Share which was deleted.
@return: The newly created TransactionLog.
"""
cls._record_share_accepted_or_deleted(share, cls.OP_SHARE_DELETED)
@classmethod
@skip_if_txlog_not_enabled
def _record_share_accepted_or_deleted(cls, share, op_type):
store = get_shard_store(share.sharedbyuser.shard_id)
node = store.get(StorageObject, share.subtree)
when_last_changed = share.when_last_changed
extra_data = dict(
shared_to=share.shared_to, share_id=str(share.id),
share_name=share.name, access_level=share.access,
when_shared=get_epoch_secs(share.when_shared),
when_last_changed=get_epoch_secs(when_last_changed))
txlog = cls(
node.id, node.owner_id, node.volume_id, op_type, node.full_path,
node.mimetype, generation=None,
extra_data=json.dumps(extra_data).decode('ascii'))
return Store.of(node).add(txlog)
@classmethod
@skip_if_txlog_not_enabled
def record_unlink(cls, node):
"""See _record_unlink."""
cls._record_unlink(node, node.generation)
@classmethod
def _record_unlink(cls, node, generation):
"""Create a TransactionLog entry representing an unlink operation.
If the given node is a file and its mimetype is not in
INTERESTING_MIMETYPES, we do nothing.
@param node: The StorageObject which was unlinked.
@param generation: The generation to use in the newly created
TransactionLog.
@return: The newly created TransactionLog or None.
"""
extra_data = json.dumps({
'kind': node.kind,
'volume_path': node.volume.path}).decode('ascii')
txlog = cls(
node.id, node.owner_id, node.volume_id, cls.OP_DELETE,
node.full_path, node.mimetype, generation=generation,
extra_data=extra_data)
return Store.of(node).add(txlog)
@classmethod
@skip_if_txlog_not_enabled
def record_unlink_tree(cls, directory):
"""See _record_unlink_tree."""
cls._record_unlink_tree(directory, directory.generation)
@classmethod
def _record_unlink_tree(cls, directory, generation):
"""Create TransactionLog entries representing an unlink_tree operation.
We create one TransactionLog entry for the given directory and each of
its descendants that is either a directory or a file with a mimetype
in INTERESTING_MIMETYPES.
@param directory: The StorageObject representing the directory that
was unlinked.
@param generation: The generation to use in all TransactionLogs
created by this method.
@return: The number of created TransactionLog entries.
"""
assert directory.kind == 'Directory', (
"The given node is not a directory.")
cls._record_unlink(directory, generation)
where_clause, extra_params = (
cls._get_interesting_descendants_where_clause(directory))
# Here we construct the extra_data json manually because it's trivial
# enough and the alternative would be to use a stored procedure, which
# requires a DB patch.
sql = """
INSERT INTO txlog.transaction_log (
node_id, owner_id, volume_id, op_type, path, generation,
mimetype, extra_data)
SELECT Object.id, Object.owner_id, Object.volume_id, ?,
txlog.path_join(Object.path, Object.name),
?, Object.mimetype,
'{"kind": "' || Object.kind || '",
"volume_path": "' || UserDefinedFolder.path || '"}'
FROM Object, UserDefinedFolder
WHERE Object.volume_id = UserDefinedFolder.id AND
""" + where_clause
params = (cls.OPERATIONS_MAP[cls.OP_DELETE], generation)
result = Store.of(directory).execute(
sql, params=params + extra_params)
# TODO: Store the rowcount in our metrics.
return result.rowcount
@classmethod
@skip_if_txlog_not_enabled
def record_move(cls, node, old_name, old_parent):
"""Create TransactionLog entries representing a move operation.
This must be called after the actual move is performed because we
assume the attributes of the given node and its descendants have
already been updated.
"""
if node.parent == old_parent and node.name == old_name:
raise ValueError(
"The old name and parent are the same as the current ones.")
old_path = os.path.join(old_parent.full_path, old_name)
rowcount = 0
# First, create a TransactionLog for the actual file/directory
# being moved.
extra_data = json.dumps(cls._get_extra_data_for_new_node(
node, node.volume.path))
txlog = cls(
node.id, node.owner_id, node.volume_id, cls.OP_MOVE,
node.full_path, node.mimetype, generation=node.generation,
old_path=old_path, extra_data=extra_data.decode('ascii'))
Store.of(node).add(txlog)
rowcount += 1
if node.kind == 'Directory':
# Now we generate a TransactionLog for every interesting
# descendant of the directory that is being moved.
old_path_base = os.path.join(old_parent.full_path, old_name)
where_clause, extra_params = (
cls._get_interesting_descendants_where_clause(node))
sql = """
INSERT INTO txlog.transaction_log (
node_id, owner_id, volume_id, op_type, path, generation,
mimetype, old_path, extra_data)
SELECT Object.id, Object.owner_id, Object.volume_id, ?,
Object.path || '/' || Object.name, ?, Object.mimetype,
? || substring(Object.path from ?) || '/' || Object.name,
txlog.get_extra_data_to_recreate_file_1(
Object.kind,
ContentBlob.size,
ContentBlob.storage_key,
Object.publicfile_id,
Object.public_uuid,
Object.content_hash,
extract(epoch from Object.when_created
at time zone 'UTC'),
extract(epoch from Object.when_last_modified
at time zone 'UTC'),
UserDefinedFolder.path
) as extra_data
FROM Object
JOIN UserDefinedFolder
on UserDefinedFolder.id = Object.volume_id
LEFT JOIN ContentBlob on ContentBlob.hash = Object.content_hash
WHERE Object.volume_id = UserDefinedFolder.id AND
""" + where_clause
params = (
cls.OPERATIONS_MAP[cls.OP_MOVE], node.generation,
old_path_base, len(node.full_path) + 1)
result = Store.of(node).execute(sql, params=params + extra_params)
rowcount += result.rowcount
# TODO: Store the rowcount in our metrics.
return rowcount
@classmethod
def _get_interesting_descendants_where_clause(cls, node):
"""Return the WHERE clause to get the interesting descendants of node.
@return: A two-tuple containing the SQL clauses and the params to be
interpolated. They are suitable to be used with Storm's
store.execute() API.
"""
sql = """
Object.volume_id = ?
-- See comment on StorageObject.get_descendants() as to why we
-- need to OR the two clauses below.
AND (Object.parent_id = ?
OR Object.path LIKE ? || '%%')
AND Object.status = 'Live'
"""
base_path = get_path_startswith(node)
params = (node.volume_id, node.id, base_path)
# We use this code to explode UDF operations and in those cases we
# will delete the root of a UDF, so we add this extra clause to avoid
# the query above picking up the root folder as a descendant of
# itself.
if node.path == '/':
sql += " AND Object.id != ?"
params = params + (node.id,)
return sql, params
|
"""
Demographics API URLs.
"""
from django.conf.urls import include, url
from .v1 import urls as v1_urls
app_name = 'openedx.core.djangoapps.demographics'
urlpatterns = [
url(r'^v1/', include(v1_urls))
]
|
from __future__ import absolute_import
from __future__ import print_function
from .._abstract.abstract import BaseAGSServer
import json
class MobileServiceLayer(BaseAGSServer):
"""
Represents a single mobile service layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_display = None
_drawingInfo = None
_extent = None
_canModifyLayer = None
_advancedQueryCapabilities = None
_hasLabels = None
_supportsAdvancedQueries = None
_id = None
_currentVersion = None
_geometryType = None
_ownershipBasedAccessControlForFeatures = None
_type = None
_useStandardizedQueries = None
_supportedQueryFormats = None
_maxRecordCount = None
_description = None
_defaultVisibility = None
_typeIdField = None
_displayField = None
_name = None
_supportsStatistics = None
_hasAttachments = None
_fields = None
_maxScale = None
_copyrightText = None
_canScaleSymbols = None
_minScale = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = self._securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print (k, " - attribute not implemented for Mobile Service Layer.")
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.keys():
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def drawingInfo(self):
"""gets the services drawing information"""
if self._drawingInfo is None:
self.__init()
return self._drawingInfo
#----------------------------------------------------------------------
@property
def extent(self):
"""returns the service layer extent"""
if self._extent is None:
self.__init()
return self._extent
#----------------------------------------------------------------------
@property
def canModifyLayer(self):
"""returns value for can modify layer"""
if self._canModifyLayer is None:
self.__init()
return self._canModifyLayer
#----------------------------------------------------------------------
@property
def advancedQueryCapabilities(self):
"""gets the advancedQueryCapabilities value"""
if self._advancedQueryCapabilities is None:
self.__init()
return self._advancedQueryCapabilities
#----------------------------------------------------------------------
@property
def hasLabels(self):
"""returns the has labels value"""
if self._hasLabels is None:
self.__init()
return self._hasLabels
#----------------------------------------------------------------------
@property
def supportsAdvancedQueries(self):
"""returns the supportsAdvancedQueries value"""
if self._supportsAdvancedQueries is None:
self.__init()
return self._supportsAdvancedQueries
#----------------------------------------------------------------------
@property
def id(self):
"""returns the layers' id"""
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""gets the layers current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def geometryType(self):
"""retusn the layers geometry type"""
if self._geometryType is None:
self.__init()
return self._geometryType
#----------------------------------------------------------------------
@property
def ownershipBasedAccessControlForFeatures(self):
"""returns the ownershipBasedAccessControlForFeatures value"""
if self._ownershipBasedAccessControlForFeatures is None:
self.__init()
return self._ownershipBasedAccessControlForFeatures
#----------------------------------------------------------------------
@property
def type(self):
"""gets the layer type"""
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def useStandardizedQueries(self):
"""gets the useStandardizedQueries value"""
if self._useStandardizedQueries is None:
self.__init()
return self._useStandardizedQueries
#----------------------------------------------------------------------
@property
def hasAttachments(self):
"""returns if the layer has attachments enabled"""
if self._hasAttachments is None:
self.__init()
return self._hasAttachments
#----------------------------------------------------------------------
@property
def supportedQueryFormats(self):
"""returns the supportedQueryFormats value"""
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""returns the max record count"""
if self._maxRecordCount is None:
self.__init()
return self._maxRecordCount
#----------------------------------------------------------------------
@property
def description(self):
"""returns the service layer description"""
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def defaultVisibility(self):
"""returns the defaultVisibility value"""
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
#----------------------------------------------------------------------
@property
def typeIdField(self):
"""returns the type id field"""
if self._typeIdField is None:
self.__init()
return self._typeIdField
#----------------------------------------------------------------------
@property
def displayField(self):
"""returns the display field"""
if self._displayField is None:
self.__init()
return self._display
#----------------------------------------------------------------------
@property
def name(self):
"""returns the layers name"""
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def supportsStatistics(self):
"""returns the supports statistics value"""
if self._supportsStatistics is None:
self.__init()
return self._supportsStatistics
#----------------------------------------------------------------------
@property
def fields(self):
"""gets the fields for the layer"""
if self._fields is None:
self.__init()
return self._fields
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""gets the copy right text"""
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def canScaleSymbols(self):
"""returns the can scale symbols value"""
if self._canScaleSymbols is None:
self.__init()
return self._canScaleSymbols
#----------------------------------------------------------------------
@property
def minScale(self):
"""returns the minScale value"""
if self._minScale is None:
self.__init()
return self._minScale
#----------------------------------------------------------------------
@property
def maxScale(self):
"""gets the max scale for the layer"""
if self._maxScale is None:
self.__init()
return self._maxScale
class MobileService(BaseAGSServer):
"""
Represents a single globe layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_layers = None
_description = None
_initialExtent = None
_spatialReference = None
_mapName = None
_currentVersion = None
_units = None
_fullExtent = None
_serviceDescription = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = self._securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print (k, " - attribute not implemented for Mobile Service.")
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.keys():
yield [att, getattr(self, att)]
#----------------------------------------------------------------------
@property
def layers(self):
"""gets the service layers"""
if self._layers is None:
self.__init()
lyrs = []
for lyr in self._layers:
url = self._url + "/%s" % lyr['id']
lyr['object'] = MobileServiceLayer(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
return self._layers
#----------------------------------------------------------------------
@property
def description(self):
"""gets the service description"""
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def initialExtent(self):
"""gets the service initial extent"""
if self._initialExtent is None:
self.__init()
return self._initialExtent
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""gets the spatial reference"""
if self._spatialReference is None:
self.__init()
return self._spatialReference
#----------------------------------------------------------------------
@property
def mapName(self):
"""gets the map name"""
if self._mapName is None:
self.__init()
return self._mapName
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""gets the current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def units(self):
"""gets the units for the service"""
if self._units is None:
self.__init()
return self._units
#----------------------------------------------------------------------
@property
def fullExtent(self):
"""returns the service full extent"""
if self._fullExtent is None:
self.__init()
return self._fullExtent
#----------------------------------------------------------------------
@property
def serviceDescription(self):
"""returns the service description"""
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
|
from eventlet import Timeout
import swift.common.utils
class MessageTimeout(Timeout):
def __init__(self, seconds=None, msg=None):
Timeout.__init__(self, seconds=seconds)
self.msg = msg
def __str__(self):
return '%s: %s' % (Timeout.__str__(self), self.msg)
class SwiftException(Exception):
pass
class PutterConnectError(Exception):
def __init__(self, status=None):
self.status = status
class InvalidTimestamp(SwiftException):
pass
class InsufficientStorage(SwiftException):
pass
class FooterNotSupported(SwiftException):
pass
class MultiphasePUTNotSupported(SwiftException):
pass
class SuffixSyncError(SwiftException):
pass
class RangeAlreadyComplete(SwiftException):
pass
class DiskFileError(SwiftException):
pass
class DiskFileNotOpen(DiskFileError):
pass
class DiskFileQuarantined(DiskFileError):
pass
class DiskFileCollision(DiskFileError):
pass
class DiskFileNotExist(DiskFileError):
pass
class DiskFileDeleted(DiskFileNotExist):
def __init__(self, metadata=None):
self.metadata = metadata or {}
self.timestamp = swift.common.utils.Timestamp(
self.metadata.get('X-Timestamp', 0))
class DiskFileExpired(DiskFileDeleted):
pass
class DiskFileNoSpace(DiskFileError):
pass
class DiskFileDeviceUnavailable(DiskFileError):
pass
class DiskFileXattrNotSupported(DiskFileError):
pass
class DeviceUnavailable(SwiftException):
pass
class InvalidAccountInfo(SwiftException):
pass
class PathNotDir(OSError):
pass
class ChunkReadError(SwiftException):
pass
class ChunkReadTimeout(Timeout):
pass
class ChunkWriteTimeout(Timeout):
pass
class ConnectionTimeout(Timeout):
pass
class ResponseTimeout(Timeout):
pass
class DriveNotMounted(SwiftException):
pass
class LockTimeout(MessageTimeout):
pass
class ThreadPoolDead(SwiftException):
pass
class RingBuilderError(SwiftException):
pass
class RingValidationError(RingBuilderError):
pass
class EmptyRingError(RingBuilderError):
pass
class DuplicateDeviceError(RingBuilderError):
pass
class UnPicklingError(SwiftException):
pass
class FileNotFoundError(SwiftException):
pass
class PermissionError(SwiftException):
pass
class ListingIterError(SwiftException):
pass
class ListingIterNotFound(ListingIterError):
pass
class ListingIterNotAuthorized(ListingIterError):
def __init__(self, aresp):
self.aresp = aresp
class SegmentError(SwiftException):
pass
class ReplicationException(Exception):
pass
class ReplicationLockTimeout(LockTimeout):
pass
class MimeInvalid(SwiftException):
pass
class APIVersionError(SwiftException):
pass
class ClientException(Exception):
def __init__(self, msg, http_scheme='', http_host='', http_port='',
http_path='', http_query='', http_status=0, http_reason='',
http_device='', http_response_content='', http_headers=None):
Exception.__init__(self, msg)
self.msg = msg
self.http_scheme = http_scheme
self.http_host = http_host
self.http_port = http_port
self.http_path = http_path
self.http_query = http_query
self.http_status = http_status
self.http_reason = http_reason
self.http_device = http_device
self.http_response_content = http_response_content
self.http_headers = http_headers or {}
def __str__(self):
a = self.msg
b = ''
if self.http_scheme:
b += '%s://' % self.http_scheme
if self.http_host:
b += self.http_host
if self.http_port:
b += ':%s' % self.http_port
if self.http_path:
b += self.http_path
if self.http_query:
b += '?%s' % self.http_query
if self.http_status:
if b:
b = '%s %s' % (b, self.http_status)
else:
b = str(self.http_status)
if self.http_reason:
if b:
b = '%s %s' % (b, self.http_reason)
else:
b = '- %s' % self.http_reason
if self.http_device:
if b:
b = '%s: device %s' % (b, self.http_device)
else:
b = 'device %s' % self.http_device
if self.http_response_content:
if len(self.http_response_content) <= 60:
b += ' %s' % self.http_response_content
else:
b += ' [first 60 chars of response] %s' \
% self.http_response_content[:60]
return b and '%s: %s' % (a, b) or a
class InvalidPidFileException(Exception):
pass
|
import datetime
import pendulum
import pytest
from airflow.example_dags.plugins.workday import AfterWorkdayTimetable
from airflow.settings import TIMEZONE
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
START_DATE = pendulum.DateTime(2021, 9, 4, tzinfo=TIMEZONE) # This is a Saturday.
WEEK_1_WEEKDAYS = [
pendulum.DateTime(2021, 9, 6, tzinfo=TIMEZONE),
pendulum.DateTime(2021, 9, 7, tzinfo=TIMEZONE),
pendulum.DateTime(2021, 9, 8, tzinfo=TIMEZONE),
pendulum.DateTime(2021, 9, 9, tzinfo=TIMEZONE),
pendulum.DateTime(2021, 9, 10, tzinfo=TIMEZONE),
]
WEEK_1_SATURDAY = pendulum.DateTime(2021, 9, 11, tzinfo=TIMEZONE)
WEEK_2_MONDAY = pendulum.DateTime(2021, 9, 13, tzinfo=TIMEZONE)
WEEK_2_TUESDAY = pendulum.DateTime(2021, 9, 14, tzinfo=TIMEZONE)
@pytest.fixture()
def restriction():
return TimeRestriction(earliest=START_DATE, latest=None, catchup=True)
@pytest.fixture()
def timetable():
return AfterWorkdayTimetable()
@pytest.mark.parametrize(
"start, end",
list(zip(WEEK_1_WEEKDAYS[:-1], WEEK_1_WEEKDAYS[1:])),
)
def test_dag_run_info_interval(start: pendulum.DateTime, end: pendulum.DateTime):
expected_info = DagRunInfo(run_after=end, data_interval=DataInterval(start, end))
assert DagRunInfo.interval(start, end) == expected_info
def test_first_schedule(timetable: Timetable, restriction: TimeRestriction):
"""Since DAG starts on Saturday, the first ever run covers the next Monday and schedules on Tuesday."""
next_info = timetable.next_dagrun_info(last_automated_data_interval=None, restriction=restriction)
assert next_info == DagRunInfo.interval(WEEK_1_WEEKDAYS[0], WEEK_1_WEEKDAYS[1])
@pytest.mark.parametrize(
"last_automated_data_interval, expected_next_info",
[
pytest.param(
DataInterval(day, day + datetime.timedelta(days=1)),
DagRunInfo.interval(
day + datetime.timedelta(days=1),
day + datetime.timedelta(days=2),
),
)
for day in WEEK_1_WEEKDAYS[:-1] # Data intervals for Monday to Tuesday.
],
)
def test_subsequent_weekday_schedule(
timetable: Timetable,
restriction: TimeRestriction,
last_automated_data_interval: DataInterval,
expected_next_info: DagRunInfo,
):
"""The next four subsequent runs cover the next four weekdays each."""
next_info = timetable.next_dagrun_info(
last_automated_data_interval=last_automated_data_interval,
restriction=restriction,
)
assert next_info == expected_next_info
def test_next_schedule_after_friday(timetable: Timetable, restriction: TimeRestriction):
"""The run after Friday's run covers Monday."""
last_automated_data_interval = DataInterval(WEEK_1_WEEKDAYS[-1], WEEK_1_SATURDAY)
expected_next_info = DagRunInfo.interval(WEEK_2_MONDAY, WEEK_2_TUESDAY)
next_info = timetable.next_dagrun_info(
last_automated_data_interval=last_automated_data_interval,
restriction=restriction,
)
assert next_info == expected_next_info
|
"""Tests for cloud_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import mock
import requests
from official.utils.logs import cloud_lib
class CloudLibTest(unittest.TestCase):
@mock.patch("requests.get")
def test_on_gcp(self, mock_requests_get):
mock_response = mock.MagicMock()
mock_requests_get.return_value = mock_response
mock_response.status_code = 200
self.assertEqual(cloud_lib.on_gcp(), True)
@mock.patch("requests.get")
def test_not_on_gcp(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ConnectionError()
self.assertEqual(cloud_lib.on_gcp(), False)
if __name__ == "__main__":
unittest.main()
|
import numpy as np
from dipy.data import get_sphere, get_3shell_gtab, get_isbi2013_2shell_gtab
from dipy.reconst.shore import ShoreModel
from dipy.reconst.shm import QballModel, sh_to_sf
from dipy.direction.peaks import gfa, peak_directions
from numpy.testing import (assert_equal,
assert_almost_equal,
run_module_suite,
assert_array_equal,
assert_raises)
from dipy.sims.voxel import SticksAndBall
from dipy.core.subdivide_octahedron import create_unit_sphere
from dipy.core.sphere_stats import angular_similarity
from dipy.reconst.tests.test_dsi import sticks_and_ball_dummies
def test_shore_odf():
gtab = get_isbi2013_2shell_gtab()
# load symmetric 724 sphere
sphere = get_sphere('symmetric724')
# load icosahedron sphere
sphere2 = create_unit_sphere(5)
data, golden_directions = SticksAndBall(gtab, d=0.0015,
S0=100, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=None)
asm = ShoreModel(gtab, radial_order=6,
zeta=700, lambdaN=1e-8, lambdaL=1e-8)
# symmetric724
asmfit = asm.fit(data)
odf = asmfit.odf(sphere)
odf_sh = asmfit.odf_sh()
odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None)
assert_almost_equal(odf, odf_from_sh, 10)
directions, _, _ = peak_directions(odf, sphere, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(
angular_similarity(directions, golden_directions), 2, 1)
# 5 subdivisions
odf = asmfit.odf(sphere2)
directions, _, _ = peak_directions(odf, sphere2, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(
angular_similarity(directions, golden_directions), 2, 1)
sb_dummies = sticks_and_ball_dummies(gtab)
for sbd in sb_dummies:
data, golden_directions = sb_dummies[sbd]
asmfit = asm.fit(data)
odf = asmfit.odf(sphere2)
directions, _ , _ = peak_directions(odf, sphere2, .35, 25)
if len(directions) <= 3:
assert_equal(len(directions), len(golden_directions))
if len(directions) > 3:
assert_equal(gfa(odf) < 0.1, True)
def test_multivox_shore():
gtab = get_3shell_gtab()
data = np.random.random([20, 30, 1, gtab.gradients.shape[0]])
radial_order = 4
zeta = 700
asm = ShoreModel(gtab, radial_order=radial_order,
zeta=zeta, lambdaN=1e-8, lambdaL=1e-8)
asmfit = asm.fit(data)
c_shore = asmfit.shore_coeff
assert_equal(c_shore.shape[0:3], data.shape[0:3])
assert_equal(np.alltrue(np.isreal(c_shore)), True)
if __name__ == '__main__':
run_module_suite()
|
"""Remove ditchchart waffle flag from Flag model data"""
from __future__ import unicode_literals
from django.db import migrations
def remove_ditchchart_waffle(apps, schema_editor):
Flag = apps.get_model('waffle', 'Flag')
Flag.objects.filter(name='ditchchart').delete()
def add_ditchchart_waffle(apps, schema_editor):
Flag = apps.get_model('waffle', 'Flag')
Flag.objects.create(
name='ditchchart',
everyone=False,
superusers=False,
staff=False,
authenticated=False,
rollout=False,
note='',
testing=False)
class Migration(migrations.Migration):
dependencies = [
('base', '0002_make_waffles'),
]
operations = [
migrations.RunPython(remove_ditchchart_waffle, add_ditchchart_waffle),
]
|
__all__ = ('pre_init', 'post_init', 'pre_save', 'pre_save_post_validation',
'post_save', 'pre_delete', 'post_delete')
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None # noqa
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = _fail
del _fail
_signals = Namespace()
pre_init = _signals.signal('pre_init')
post_init = _signals.signal('post_init')
pre_save = _signals.signal('pre_save')
pre_save_post_validation = _signals.signal('pre_save_post_validation')
post_save = _signals.signal('post_save')
pre_delete = _signals.signal('pre_delete')
post_delete = _signals.signal('post_delete')
pre_bulk_insert = _signals.signal('pre_bulk_insert')
post_bulk_insert = _signals.signal('post_bulk_insert')
|
import unittest
import numpy as np
import theano
from keras.utils.theano_utils import ndim_tensor
from keras.layers.core import *
from keras.layers.convolutional import *
from keras.layers.recurrent import SimpleRNN
def check_layer_output_shape(layer, input_data):
ndim = len(input_data.shape)
layer.input = ndim_tensor(ndim)
layer.set_input_shape(input_data.shape[1:])
expected_output_shape = layer.output_shape[1:]
function = theano.function([layer.input], [layer.get_output()])
output = function(input_data)[0]
assert output.shape[1:] == expected_output_shape
class TestShapeInference(unittest.TestCase):
# ########
# # Core #
# ########
def test_Reshape(self):
layer = Reshape(dims=(2, 3))
input_data = np.random.random((2, 6))
check_layer_output_shape(layer, input_data)
def test_Permute(self):
layer = Permute(dims=(1, 3, 2))
input_data = np.random.random((2, 2, 4, 3))
check_layer_output_shape(layer, input_data)
def test_Flatten(self):
layer = Flatten()
input_data = np.random.random((2, 2, 3))
check_layer_output_shape(layer, input_data)
def test_RepeatVector(self):
layer = RepeatVector(2)
input_data = np.random.random((2, 2))
check_layer_output_shape(layer, input_data)
def test_Dense(self):
layer = Dense(3)
input_data = np.random.random((2, 2))
check_layer_output_shape(layer, input_data)
def test_TimeDistributedDense(self):
layer = TimeDistributedDense(2)
input_data = np.random.random((2, 2, 3))
check_layer_output_shape(layer, input_data)
#################
# Convolutional #
#################
def test_Convolution1D(self):
for border_mode in ['same', 'full', 'valid']:
for filter_length in [2, 3]:
for subsample_length in [1, 2]:
if subsample_length > 1 and border_mode == 'same':
continue
for input_data_shape in [(2, 3, 2), (2, 4, 2)]:
layer = Convolution1D(nb_filter=1, filter_length=filter_length,
border_mode=border_mode, subsample_length=subsample_length)
input_data = np.random.random(input_data_shape)
check_layer_output_shape(layer, input_data)
def test_Convolution2D(self):
for border_mode in ['same', 'full', 'valid']:
for nb_row, nb_col in [(2, 1), (3, 2)]:
for subsample in [(1, 1), (2, 2)]:
if (subsample[0] > 1 or subsample[1] > 1) and border_mode == 'same':
continue
for input_data_shape in [(2, 1, 3, 3), (2, 1, 4, 4)]:
layer = Convolution2D(nb_filter=1, nb_row=nb_row, nb_col=nb_row,
border_mode=border_mode, subsample=subsample)
input_data = np.random.random(input_data_shape)
check_layer_output_shape(layer, input_data)
def test_MaxPooling1D(self):
for ignore_border in [True, False]:
for stride in [1, 2]:
for pool_length in [1, 2]:
for input_data_shape in [(2, 1, 3), (2, 1, 4)]:
layer = MaxPooling1D(pool_length=pool_length, stride=stride, ignore_border=ignore_border)
input_data = np.random.random(input_data_shape)
check_layer_output_shape(layer, input_data)
def test_MaxPooling2D(self):
for ignore_border in [True, False]:
for stride in [(1, 1), (2, 2)]:
for pool_size in [(2, 2), (3, 3), (4, 4)]:
for input_data_shape in [(2, 1, 3, 3), (2, 1, 4, 4), (2, 1, 5, 5), (2, 1, 6, 6)]:
layer = MaxPooling2D(pool_size=pool_size, stride=stride, ignore_border=ignore_border)
input_data = np.random.random(input_data_shape)
check_layer_output_shape(layer, input_data)
def test_UpSample1D(self):
layer = UpSample1D(length=2)
input_data = np.random.random((2, 2, 3))
check_layer_output_shape(layer, input_data)
def test_UpSample2D(self):
layer = UpSample2D(size=(2, 2))
input_data = np.random.random((2, 1, 2, 3))
check_layer_output_shape(layer, input_data)
def test_ZeroPadding1D(self):
layer = ZeroPadding1D(1)
input_data = np.random.random((2, 2, 1))
check_layer_output_shape(layer, input_data)
def test_ZeroPadding2D(self):
layer = ZeroPadding2D((1, 2))
input_data = np.random.random((2, 1, 2, 3))
check_layer_output_shape(layer, input_data)
# #############
# # Recurrent #
# #############
def test_SimpleRNN(self):
# all recurrent layers inherit output_shape
# from the same base recurrent layer
layer = SimpleRNN(2)
input_data = np.random.random((2, 2, 3))
check_layer_output_shape(layer, input_data)
if __name__ == "__main__":
unittest.main()
|
import bs4 as BeautifulSoup
from DOM.W3C import w3c
from OS.Windows import security_sys
from DOM.W3C.NamedNodeMap import NamedNodeMap
import logging
log = logging.getLogger("Thug")
def loadXML(self, bstrXML):
self.xml = w3c.parseString(bstrXML)
#self.attributes = NamedNodeMap(self.xml._node)
if "res://" not in bstrXML:
return
for p in bstrXML.split('"'):
if p.startswith("res://"):
log.ThugLogging.add_behavior_warn("[Microsoft XMLDOM ActiveX] Attempting to load %s" % (p, ))
if any(sys.lower() in p.lower() for sys in security_sys):
self.parseError._errorCode = 0
for p in bstrXML.split("'"):
if p.startswith("res://"):
log.ThugLogging.add_behavior_warn("[Microsoft XMLDOM ActiveX] Attempting to load %s" % (p, ))
if any(sys.lower() in p.lower() for sys in security_sys):
self.parseError._errorCode = 0
|
"""
rv_smartcard.py - Testing software smartcards using remote-viewer
Requires: connected binaries remote-viewer, Xorg, gnome session
The test also assumes that the guest is setup with the correct
options to handle smartcards.
"""
import logging
from autotest.client.shared import error
def run(test, params, env):
"""
Tests disconnection of remote-viewer.
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
# Get the required parameters needed for the tests
cert_list = params.get("gencerts").split(",")
cert_db = params.get("certdb")
smartcard_testtype = params.get("smartcard_testtype")
listcerts_output = ""
certsinfo_output = ""
searchstr = params.get("certcheckstr")
certstr = params.get("certcheckstr2")
certcheck1 = params.get("certcheck3")
certcheck2 = params.get("certcheck4")
guest_vm = env.get_vm(params["guest_vm"])
guest_vm.verify_alive()
guest_session = guest_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)),
username="root", password="123456")
client_vm = env.get_vm(params["client_vm"])
client_vm.verify_alive()
client_session = client_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)),
username="root", password="123456")
# Verify remote-viewer is running
try:
pid = client_session.cmd("pgrep remote-viewer")
logging.info("remote-viewer is running as PID %s", pid.strip())
except:
raise error.TestFail("remote-viewer is not running")
# verify the smart card reader can be seen
output = guest_session.cmd("lsusb")
logging.debug("lsusb output: " + output)
if "Gemalto (was Gemplus) GemPC433-Swap" in output:
logging.info("Smartcard reader, Gemalto GemPC433-Swap detected.")
else:
raise error.TestFail("No smartcard reader found")
if smartcard_testtype == "pkcs11_listcerts":
# pkcs11_listcerts not installed until Smart Card Support is installed
try:
output = guest_session.cmd_output("pkcs11_listcerts")
except:
# Expected to get a shell timeout error,
# listing certs prompts for PIN
try:
# Send a carriage return for PIN for token
listcerts_output = guest_session.cmd("")
except:
raise error.TestFail("Test failed trying to get the output"
" of pkcs11_listcerts")
logging.info("Listing Certs available on the guest: " +
listcerts_output)
for cert in cert_list:
subj_string = "CN=" + cert
if subj_string in listcerts_output:
logging.debug(subj_string + " has been found" +
" as a listed cert in the guest")
else:
raise error.TestFail("Certificate %s was not found as a listed"
" cert in the guest" % subj_string)
elif smartcard_testtype == "pklogin_finder":
# pkcs11_listcerts not installed until
# Smart Card Support is installed
try:
certsinfo_output = guest_session.cmd("pklogin_finder debug")
except:
# Expected to get a shell timeout error,
# listing certs prompts for PIN
try:
# Send a carriage return for PIN for token
certsinfo_output = guest_session.cmd("", ok_status=[0, 1])
except:
raise error.TestFail("Test failed trying to get the output"
" of pklogin_finder")
testindex = certsinfo_output.find(searchstr)
if testindex >= 0:
string_aftercheck = certsinfo_output[testindex:]
# Loop through the cert list. and check for the expected data, and
for index, cert in enumerate(cert_list):
subj_string = "CN=" + cert
checkstr = certstr + str(index + 1)
testindex = string_aftercheck.find(checkstr)
# print testindex
if testindex >= 0:
logging.debug("Found " + checkstr + "in output of pklogin")
string_aftercheck = string_aftercheck[testindex:]
testindex2 = string_aftercheck.find(subj_string)
if testindex >= 0:
logging.debug("Found " + subj_string +
"in output of pklogin")
string_aftercheck = string_aftercheck[testindex2:]
testindex3 = string_aftercheck.find(certcheck1)
if testindex3 >= 0:
logging.debug("Found " + certcheck1 +
"in output of pklogin")
string_aftercheck = string_aftercheck[testindex3:]
testindex4 = string_aftercheck.find(certcheck2)
if testindex4 >= 0:
logging.debug("Found " + certcheck2 +
"in output of pklogin")
else:
raise error.TestFail(certcheck2 + " not found"
" in output of pklogin "
"on the guest")
else:
raise error.TestFail(certcheck1 + " not found in "
"output of pklogin on the"
" guest")
else:
raise error.TestFail("Common name %s, not found "
"in pkogin_finder after software "
"smartcard was inserted into the "
"guest" % subj_string)
else:
raise error.TestFail(checkstr + " not found in output of "
"pklogin on the guest")
else:
raise error.TestFail(searchstr + " not found in output of pklogin"
" on the guest")
logging.info("Certs Info on the guest: " + certsinfo_output)
else:
raise error.TestFail("Please specify a valid smartcard testype")
# Do some cleanup, remove the certs on the client
# for each cert listed by the test, create it on the client
for cert in cert_list:
cmd = "certutil "
cmd += "-D -n '" + cert + "' -d " + cert_db
try:
output = client_session.cmd(cmd)
except:
logging.warning(
"Deleting of %s certificate from the client failed",
cert)
logging.debug("Output of " + cmd + ": " + output)
client_session.close()
guest_session.close()
|
from freestyle.chainingiterators import ChainSilhouetteIterator
from freestyle.predicates import (
AndUP1D,
NotUP1D,
QuantitativeInvisibilityUP1D,
TrueUP1D,
pyIsInOccludersListUP1D,
)
from freestyle.shaders import (
ConstantColorShader,
ConstantThicknessShader,
SamplingShader,
)
from freestyle.types import Id, Operators
id = Id(3,0)
upred = AndUP1D(NotUP1D(QuantitativeInvisibilityUP1D(0)), pyIsInOccludersListUP1D(id))
Operators.select(upred)
Operators.bidirectional_chain(ChainSilhouetteIterator(), NotUP1D(upred))
shaders_list = [
SamplingShader(5),
ConstantThicknessShader(3),
ConstantColorShader(0.3, 0.3, 0.3, 1),
]
Operators.create(TrueUP1D(), shaders_list)
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import optparse_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_native, to_text
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = frozenset(("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup"))
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
self.parser.set_description("Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.set_description("Import a role.")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
self.parser.set_description("View more details about a specific role.")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.set_description("Initialize new role with the base structure of a role.")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.set_description("Install Roles from file(s), URL(s) or tar file(s)")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
self.parser.set_description("Delete a role from roles_path.")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
self.parser.set_description("Show the name and version of each role installed in the roles_path.")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.set_description("Login to api.github.com server in order to use ansible-galaxy sub command such as 'import', 'delete' and 'setup'.")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
self.parser.set_description("Search the Galaxy database by tags, platforms, author and multiple keywords.")
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
self.parser.set_description("Manage the integration between Galaxy and the given source.")
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=opt_help.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
' file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
if self.action == "install":
self.parser.add_option('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing role and it's dependencies")
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(sorted(self.VALID_ACTIONS)),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]),
desc="Perform various Role related operations.",
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
def post_process_args(self, options, args):
options, args = super(GalaxyCLI, self).post_process_args(options, args)
display.verbosity = options.verbosity
return options, args
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
self.api = GalaxyAPI(self.galaxy)
self.execute()
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
role_skeleton = context.CLIARGS['role_skeleton']
role_name = context.CLIARGS['args'][0].strip() if context.CLIARGS['args'] else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='2.4',
role_type=context.CLIARGS['role_type']
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if not context.CLIARGS['args']:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if no_deps and force_deps:
raise AnsibleOptionsError("You cannot both force dependencies and no dependencies")
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file (%s): %s" % (role_file, to_native(e)))
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
raise AnsibleError("Invalid role requirements file")
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, to_native(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(context.CLIARGS['args']) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['args']:
# show the requested role, if it exists
name = context.CLIARGS['args'][0]
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via GitHub and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(context.CLIARGS['args']) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_user = to_text(context.CLIARGS['args'][0], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['args'][1], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with GitHub repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from GitHub or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(context.CLIARGS['args']) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
source = context.CLIARGS['args'][0]
github_user = context.CLIARGS['args'][1]
github_repo = context.CLIARGS['args'][2]
secret = context.CLIARGS['args'][3]
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(context.CLIARGS['args']) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_user = context.CLIARGS['args'][0]
github_repo = context.CLIARGS['args'][1]
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
import re
import urllib
import base64
from boto.connection import AWSAuthConnection
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
import boto
import boto.jsonresponse
from boto.ses import exceptions as ses_exceptions
class SESConnection(AWSAuthConnection):
ResponseError = BotoServerError
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'email.us-east-1.amazonaws.com'
APIVersion = '2010-12-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(SESConnection, self).__init__(self.region.endpoint,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['ses']
def _build_list_params(self, params, items, label):
"""Add an AWS API-compatible parameter list to a dictionary.
:type params: dict
:param params: The parameter dictionary
:type items: list
:param items: Items to be included in the list
:type label: string
:param label: The parameter list's name
"""
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def _make_request(self, action, params=None):
"""Make a call to the SES API.
:type action: string
:param action: The API method to use (e.g. SendRawEmail)
:type params: dict
:param params: Parameters that will be sent as POST data with the API
call.
"""
ct = 'application/x-www-form-urlencoded; charset=UTF-8'
headers = {'Content-Type': ct}
params = params or {}
params['Action'] = action
for k, v in params.items():
if isinstance(v, unicode): # UTF-8 encode only if it's Unicode
params[k] = v.encode('utf-8')
response = super(SESConnection, self).make_request(
'POST',
'/',
headers=headers,
data=urllib.urlencode(params)
)
body = response.read()
if response.status == 200:
list_markers = ('VerifiedEmailAddresses', 'Identities',
'DkimTokens', 'VerificationAttributes',
'SendDataPoints')
item_markers = ('member', 'item', 'entry')
e = boto.jsonresponse.Element(list_marker=list_markers,
item_marker=item_markers)
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
# HTTP codes other than 200 are considered errors. Go through
# some error handling to determine which exception gets raised,
self._handle_error(response, body)
def _handle_error(self, response, body):
"""
Handle raising the correct exception, depending on the error. Many
errors share the same HTTP response code, meaning we have to get really
kludgey and do string searches to figure out what went wrong.
"""
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
if "Address blacklisted." in body:
# Delivery failures happened frequently enough with the recipient's
# email address for Amazon to blacklist it. After a day or three,
# they'll be automatically removed, and delivery can be attempted
# again (if you write the code to do so in your application).
ExceptionToRaise = ses_exceptions.SESAddressBlacklistedError
exc_reason = "Address blacklisted."
elif "Email address is not verified." in body:
# This error happens when the "Reply-To" value passed to
# send_email() hasn't been verified yet.
ExceptionToRaise = ses_exceptions.SESAddressNotVerifiedError
exc_reason = "Email address is not verified."
elif "Daily message quota exceeded." in body:
# Encountered when your account exceeds the maximum total number
# of emails per 24 hours.
ExceptionToRaise = ses_exceptions.SESDailyQuotaExceededError
exc_reason = "Daily message quota exceeded."
elif "Maximum sending rate exceeded." in body:
# Your account has sent above its allowed requests a second rate.
ExceptionToRaise = ses_exceptions.SESMaxSendingRateExceededError
exc_reason = "Maximum sending rate exceeded."
elif "Domain ends with dot." in body:
# Recipient address ends with a dot/period. This is invalid.
ExceptionToRaise = ses_exceptions.SESDomainEndsWithDotError
exc_reason = "Domain ends with dot."
elif "Local address contains control or whitespace" in body:
# I think this pertains to the recipient address.
ExceptionToRaise = ses_exceptions.SESLocalAddressCharacterError
exc_reason = "Local address contains control or whitespace."
elif "Illegal address" in body:
# A clearly mal-formed address.
ExceptionToRaise = ses_exceptions.SESIllegalAddressError
exc_reason = "Illegal address"
# The re.search is to distinguish from the
# SESAddressNotVerifiedError error above.
elif re.search('Identity.*is not verified', body):
ExceptionToRaise = ses_exceptions.SESIdentityNotVerifiedError
exc_reason = "Identity is not verified."
elif "ownership not confirmed" in body:
ExceptionToRaise = ses_exceptions.SESDomainNotConfirmedError
exc_reason = "Domain ownership is not confirmed."
else:
# This is either a common AWS error, or one that we don't devote
# its own exception to.
ExceptionToRaise = self.ResponseError
exc_reason = response.reason
raise ExceptionToRaise(response.status, exc_reason, body)
def send_email(self, source, subject, body, to_addresses,
cc_addresses=None, bcc_addresses=None,
format='text', reply_addresses=None,
return_path=None, text_body=None, html_body=None):
"""Composes an email message based on input data, and then immediately
queues the message for sending.
:type source: string
:param source: The sender's email address.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:type to_addresses: list of strings or string
:param to_addresses: The To: field(s) of the message.
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type reply_addresses: list of strings or string
:param reply_addresses: The reply-to email address(es) for the
message. If the recipient replies to the
message, each reply-to address will
receive the reply.
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message
will then be forwarded to the email address
specified by the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
"""
format = format.lower().strip()
if body is not None:
if format == "text":
if text_body is not None:
raise Warning("You've passed in both a body and a "
"text_body; please choose one or the other.")
text_body = body
else:
if html_body is not None:
raise Warning("You've passed in both a body and an "
"html_body; please choose one or the other.")
html_body = body
params = {
'Source': source,
'Message.Subject.Data': subject,
}
if return_path:
params['ReturnPath'] = return_path
if html_body is not None:
params['Message.Body.Html.Data'] = html_body
if text_body is not None:
params['Message.Body.Text.Data'] = text_body
if(format not in ("text", "html")):
raise ValueError("'format' argument must be 'text' or 'html'")
if(not (html_body or text_body)):
raise ValueError("No text or html body found for mail")
self._build_list_params(params, to_addresses,
'Destination.ToAddresses.member')
if cc_addresses:
self._build_list_params(params, cc_addresses,
'Destination.CcAddresses.member')
if bcc_addresses:
self._build_list_params(params, bcc_addresses,
'Destination.BccAddresses.member')
if reply_addresses:
self._build_list_params(params, reply_addresses,
'ReplyToAddresses.member')
return self._make_request('SendEmail', params)
def send_raw_email(self, raw_message, source=None, destinations=None):
"""Sends an email message, with header and content specified by the
client. The SendRawEmail action is useful for sending multipart MIME
emails, with attachments or inline content. The raw text of the message
must comply with Internet email standards; otherwise, the message
cannot be sent.
:type source: string
:param source: The sender's email address. Amazon's docs say:
If you specify the Source parameter, then bounce notifications and
complaints will be sent to this email address. This takes precedence
over any Return-Path header that you might include in the raw text of
the message.
:type raw_message: string
:param raw_message: The raw text of the message. The client is
responsible for ensuring the following:
- Message must contain a header and a body, separated by a blank line.
- All required header fields must be present.
- Each part of a multipart MIME message must be formatted properly.
- MIME content types must be among those supported by Amazon SES.
Refer to the Amazon SES Developer Guide for more details.
- Content must be base64-encoded, if MIME requires it.
:type destinations: list of strings or string
:param destinations: A list of destinations for the message.
"""
if isinstance(raw_message, unicode):
raw_message = raw_message.encode('utf-8')
params = {
'RawMessage.Data': base64.b64encode(raw_message),
}
if source:
params['Source'] = source
if destinations:
self._build_list_params(params, destinations,
'Destinations.member')
return self._make_request('SendRawEmail', params)
def list_verified_email_addresses(self):
"""Fetch a list of the email addresses that have been verified.
:rtype: dict
:returns: A ListVerifiedEmailAddressesResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('ListVerifiedEmailAddresses')
def get_send_quota(self):
"""Fetches the user's current activity limits.
:rtype: dict
:returns: A GetSendQuotaResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendQuota')
def get_send_statistics(self):
"""Fetches the user's sending statistics. The result is a list of data
points, representing the last two weeks of sending activity.
Each data point in the list contains statistics for a 15-minute
interval.
:rtype: dict
:returns: A GetSendStatisticsResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendStatistics')
def delete_verified_email_address(self, email_address):
"""Deletes the specified email address from the list of verified
addresses.
:type email_adddress: string
:param email_address: The email address to be removed from the list of
verified addreses.
:rtype: dict
:returns: A DeleteVerifiedEmailAddressResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('DeleteVerifiedEmailAddress', {
'EmailAddress': email_address,
})
def verify_email_address(self, email_address):
"""Verifies an email address. This action causes a confirmation email
message to be sent to the specified address.
:type email_adddress: string
:param email_address: The email address to be verified.
:rtype: dict
:returns: A VerifyEmailAddressResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('VerifyEmailAddress', {
'EmailAddress': email_address,
})
def verify_domain_dkim(self, domain):
"""
Returns a set of DNS records, or tokens, that must be published in the
domain name's DNS to complete the DKIM verification process. These
tokens are DNS ``CNAME`` records that point to DKIM public keys hosted
by Amazon SES. To complete the DKIM verification process, these tokens
must be published in the domain's DNS. The tokens must remain
published in order for Easy DKIM signing to function correctly.
After the tokens are added to the domain's DNS, Amazon SES will be able
to DKIM-sign email originating from that domain. To enable or disable
Easy DKIM signing for a domain, use the ``SetIdentityDkimEnabled``
action. For more information about Easy DKIM, go to the `Amazon SES
Developer Guide
<http://docs.amazonwebservices.com/ses/latest/DeveloperGuide>`_.
:type domain: string
:param domain: The domain name.
"""
return self._make_request('VerifyDomainDkim', {
'Domain': domain,
})
def set_identity_dkim_enabled(self, identity, dkim_enabled):
"""Enables or disables DKIM signing of email sent from an identity.
* If Easy DKIM signing is enabled for a domain name identity (e.g.,
* ``example.com``),
then Amazon SES will DKIM-sign all email sent by addresses under that
domain name (e.g., ``user@example.com``)
* If Easy DKIM signing is enabled for an email address, then Amazon SES
will DKIM-sign all email sent by that email address.
For email addresses (e.g., ``user@example.com``), you can only enable
Easy DKIM signing if the corresponding domain (e.g., ``example.com``)
has been set up for Easy DKIM using the AWS Console or the
``VerifyDomainDkim`` action.
:type identity: string
:param identity: An email address or domain name.
:type dkim_enabled: bool
:param dkim_enabled: Specifies whether or not to enable DKIM signing.
"""
return self._make_request('SetIdentityDkimEnabled', {
'Identity': identity,
'DkimEnabled': 'true' if dkim_enabled else 'false'
})
def get_identity_dkim_attributes(self, identities):
"""Get attributes associated with a list of verified identities.
Given a list of verified identities (email addresses and/or domains),
returns a structure describing identity notification attributes.
:type identities: list
:param identities: A list of verified identities (email addresses
and/or domains).
"""
params = {}
self._build_list_params(params, identities, 'Identities.member')
return self._make_request('GetIdentityDkimAttributes', params)
def list_identities(self):
"""Returns a list containing all of the identities (email addresses
and domains) for a specific AWS Account, regardless of
verification status.
:rtype: dict
:returns: A ListIdentitiesResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('ListIdentities')
def get_identity_verification_attributes(self, identities):
"""Given a list of identities (email addresses and/or domains),
returns the verification status and (for domain identities)
the verification token for each identity.
:type identities: list of strings or string
:param identities: List of identities.
:rtype: dict
:returns: A GetIdentityVerificationAttributesResponse structure.
Note that keys must be unicode strings.
"""
params = {}
self._build_list_params(params, identities,
'Identities.member')
return self._make_request('GetIdentityVerificationAttributes', params)
def verify_domain_identity(self, domain):
"""Verifies a domain.
:type domain: string
:param domain: The domain to be verified.
:rtype: dict
:returns: A VerifyDomainIdentityResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('VerifyDomainIdentity', {
'Domain': domain,
})
def verify_email_identity(self, email_address):
"""Verifies an email address. This action causes a confirmation
email message to be sent to the specified address.
:type email_adddress: string
:param email_address: The email address to be verified.
:rtype: dict
:returns: A VerifyEmailIdentityResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('VerifyEmailIdentity', {
'EmailAddress': email_address,
})
def delete_identity(self, identity):
"""Deletes the specified identity (email address or domain) from
the list of verified identities.
:type identity: string
:param identity: The identity to be deleted.
:rtype: dict
:returns: A DeleteIdentityResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('DeleteIdentity', {
'Identity': identity,
})
def set_identity_notification_topic(self, identity, notification_type, sns_topic=None):
"""Sets an SNS topic to publish bounce or complaint notifications for
emails sent with the given identity as the Source. Publishing to topics
may only be disabled when feedback forwarding is enabled.
:type identity: string
:param identity: An email address or domain name.
:type notification_type: string
:param notification_type: The type of feedback notifications that will
be published to the specified topic.
Valid Values: Bounce | Complaint
:type sns_topic: string or None
:param sns_topic: The Amazon Resource Name (ARN) of the Amazon Simple
Notification Service (Amazon SNS) topic.
"""
params = {
'Identity': identity,
'NotificationType': notification_type
}
if sns_topic:
params['SnsTopic'] = sns_topic
return self._make_request('SetIdentityNotificationTopic', params)
def set_identity_feedback_forwarding_enabled(self, identity, forwarding_enabled=True):
"""
Enables or disables SES feedback notification via email.
Feedback forwarding may only be disabled when both complaint and
bounce topics are set.
:type identity: string
:param identity: An email address or domain name.
:type forwarding_enabled: bool
:param forwarding_enabled: Specifies whether or not to enable feedback forwarding.
"""
return self._make_request('SetIdentityFeedbackForwardingEnabled', {
'Identity': identity,
'ForwardingEnabled': 'true' if forwarding_enabled else 'false'
})
|
MPL = """\
This Source Code Form is subject to the terms of the Mozilla Public \
License, v. 2.0. If a copy of the MPL was not distributed with this \
file, You can obtain one at http://mozilla.org/MPL/2.0/.\
"""
APACHE = """\
Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or \
http://www.apache.org/licenses/LICENSE-2.0> or the MIT license \
<LICENSE-MIT or http://opensource.org/licenses/MIT>, at your \
option. This file may not be copied, modified, or distributed \
except according to those terms.\
"""
COPYRIGHT = [
"See the COPYRIGHT file at the top-level directory of this distribution",
"See http://rust-lang.org/COPYRIGHT",
]
licenses_toml = [
'license = "MPL-2.0"',
'license = "MIT/Apache-2.0"',
]
licenses_dep_toml = [
# Licenses that are compatible with Servo's licensing
'license = "Apache-2 / MIT"',
'license = "Apache-2.0 / MIT"',
'license = "Apache-2.0"',
'license = "Apache-2.0/MIT"',
'license = "BSD-2-Clause"',
'license = "BSD-3-Clause"',
'license = "BSD-3-Clause/MIT"',
'license = "CC0-1.0"',
'license = "ISC"',
'license = "MIT / Apache-2.0"',
'license = "MIT OR Apache-2.0"',
'license = "MIT"',
'license = "MIT/Apache-2.0"',
'license = "MPL-2.0"',
'license = "Unlicense/MIT"',
'license = "zlib-acknowledgement"',
'license-file = "LICENSE-MIT"',
'license= "MIT / Apache-2.0"',
# Whitelisted crates whose licensing has been checked manually
'name = "device"',
'name = "dylib"',
'name = "ipc-channel"',
'name = "mozjs_sys"',
'name = "azure"',
'name = "freetype"',
'name = "js"',
'name = "servo-freetype-sys"',
'name = "webrender"',
'name = "webrender_api"',
]
|
from translate.lang import factory
def test_punctranslate():
"""Tests that we can translate punctuation."""
language = factory.getlanguage('ne')
assert language.punctranslate(u"") == u""
assert language.punctranslate(u"abc efg") == u"abc efg"
assert language.punctranslate(u"abc efg.") == u"abc efg ।"
assert language.punctranslate(u"(abc efg).") == u"(abc efg) ।"
assert language.punctranslate(u"abc efg...") == u"abc efg..."
assert language.punctranslate(u"abc efg?") == u"abc efg ?"
def test_sentences():
"""Tests basic functionality of sentence segmentation."""
language = factory.getlanguage('ne')
sentences = language.sentences(u"")
assert sentences == []
# Without spaces before the punctuation
sentences = language.sentences(u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ। यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ।\n")
assert sentences == [u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ।", u"यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ।"]
# With spaces before the punctuation
sentences = language.sentences(u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ । यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ ।\n")
assert sentences == [u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ ।", u"यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ ।"]
|
"""Base class for UniFi clients."""
import logging
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from .unifi_entity_base import UniFiBase
LOGGER = logging.getLogger(__name__)
class UniFiClient(UniFiBase):
"""Base class for UniFi clients."""
def __init__(self, client, controller) -> None:
"""Set up client."""
super().__init__(client, controller)
self._is_wired = client.mac not in controller.wireless_clients
@property
def client(self):
"""Wrap item."""
return self._item
@property
def is_wired(self):
"""Return if the client is wired.
Allows disabling logic to keep track of clients affected by UniFi wired bug marking wireless devices as wired. This is useful when running a network not only containing UniFi APs.
"""
if self._is_wired and self.client.mac in self.controller.wireless_clients:
self._is_wired = False
if self.controller.option_ignore_wired_bug:
return self.client.is_wired
return self._is_wired
@property
def unique_id(self):
"""Return a unique identifier for this switch."""
return f"{self.TYPE}-{self.client.mac}"
@property
def name(self) -> str:
"""Return the name of the client."""
return self.client.name or self.client.hostname
@property
def available(self) -> bool:
"""Return if controller is available."""
return self.controller.available
@property
def device_info(self) -> dict:
"""Return a client description for device registry."""
return {"connections": {(CONNECTION_NETWORK_MAC, self.client.mac)}}
|
class A(B):
def __init__(self):
<warning descr="Python version 2.7 does not support this syntax. super() should have arguments in Python 2">super()</warning>
<warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not have method cmp">cmp()</warning>
<warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not have method reduce">reduce()</warning>
<warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not have method buffer">buffer()</warning>
|
"""
This module's purpose is to enable us to present internals of objects
in well-defined way to operator. To do this we can define "views"
on some objects. View is a definition of how to present object
and relations to other objects which also have their views defined.
By using views we can avoid making all interesting internal values
public. They will stay private and only "view" will access them
(think friend-class from C++)
"""
import logging
from ryu.services.protocols.bgp.operator.views import fields
LOG = logging.getLogger('bgpspeaker.operator.views.base')
class RdyToFlattenCollection(object):
pass
class RdyToFlattenList(list, RdyToFlattenCollection):
pass
class RdyToFlattenDict(dict, RdyToFlattenCollection):
pass
class OperatorAbstractView(object):
"""Abstract base class for operator views. It isn't meant to be
instantiated.
"""
def __init__(self, obj, filter_func=None):
"""Init
:param obj: data model for view. In other words object we
are creating view for. In case of ListView it should be
a list and in case of DictView it should be a dict.
:param filter_func: function to filter models
"""
self._filter_func = filter_func
self._fields = self._collect_fields()
self._obj = obj
@classmethod
def _collect_fields(cls):
names = [attr for attr in dir(cls)
if isinstance(getattr(cls, attr), fields.Field)]
return dict([(name, getattr(cls, name)) for name in names])
def combine_related(self, field_name):
"""Combines related views. In case of DetailView it just returns
one-element list containing related view wrapped in
CombinedViewsWrapper.
In case of ListView and DictView it returns a list of related views
for every element of model collection also wrapped
in CombinedViewsWrapper.
:param field_name: field name of related view
:returns: vectorized form of related views. You can access them
as if you had only one view and you will receive flattened list
of responses from related views. Look at docstring of
CombinedViewsWrapper
"""
raise NotImplementedError()
def c_rel(self, *args, **kwargs):
"""Shortcut for combine_related. Look above
"""
return self.combine_related(*args, **kwargs)
def get_field(self, field_name):
"""Get value of data field.
:return: value of data-field of this view
"""
raise NotImplementedError()
def encode(self):
"""Representation of view which is using only python standard types.
:return: dict representation of this views data. However it
doesn't have to be a dict. In case of ListView it would
return a list. It should return wrapped types
for list - RdyToFlattenList, for dict - RdyToFlattenDict
"""
raise NotImplementedError()
@property
def model(self):
"""Getter for data model being presented by this view. Every view is
associatetd with some data model.
:return: underlaying data of this view
"""
raise NotImplementedError()
def apply_filter(self, filter_func):
"""Sets filter function to apply on model
:param filter_func: function which takes the model and returns it
filtered
"""
self._filter_func = filter_func
def clear_filter(self):
self._filter_func = None
class OperatorDetailView(OperatorAbstractView):
def combine_related(self, field_name):
f = self._fields[field_name]
return CombinedViewsWrapper([f.retrieve_and_wrap(self._obj)])
def get_field(self, field_name):
f = self._fields[field_name]
return f.get(self._obj)
def encode(self):
encoded = {}
for field_name, field in self._fields.items():
if isinstance(field, fields.DataField):
encoded[field_name] = field.get(self._obj)
return encoded
def rel(self, field_name):
f = self._fields[field_name]
return f.retrieve_and_wrap(self._obj)
@property
def model(self):
return self._obj
class OperatorListView(OperatorAbstractView):
def __init__(self, obj, filter_func=None):
assert isinstance(obj, list)
obj = RdyToFlattenList(obj)
super(OperatorListView, self).__init__(obj, filter_func)
def combine_related(self, field_name):
f = self._fields[field_name]
return CombinedViewsWrapper(RdyToFlattenList(
[f.retrieve_and_wrap(obj) for obj in self.model]
))
def get_field(self, field_name):
f = self._fields[field_name]
return RdyToFlattenList([f.get(obj) for obj in self.model])
def encode(self):
encoded_list = []
for obj in self.model:
encoded_item = {}
for field_name, field in self._fields.items():
if isinstance(field, fields.DataField):
encoded_item[field_name] = field.get(obj)
encoded_list.append(encoded_item)
return RdyToFlattenList(encoded_list)
@property
def model(self):
if self._filter_func is not None:
return RdyToFlattenList(filter(self._filter_func, self._obj))
else:
return self._obj
class OperatorDictView(OperatorAbstractView):
def __init__(self, obj, filter_func=None):
assert isinstance(obj, dict)
obj = RdyToFlattenDict(obj)
super(OperatorDictView, self).__init__(obj, filter_func)
def combine_related(self, field_name):
f = self._fields[field_name]
return CombinedViewsWrapper(RdyToFlattenList(
[f.retrieve_and_wrap(obj) for obj in self.model.values()])
)
def get_field(self, field_name):
f = self._fields[field_name]
dict_to_flatten = {}
for key, obj in self.model.items():
dict_to_flatten[key] = f.get(obj)
return RdyToFlattenDict(dict_to_flatten)
def encode(self):
outer_dict_to_flatten = {}
for key, obj in self.model.items():
inner_dict_to_flatten = {}
for field_name, field in self._fields.items():
if isinstance(field, fields.DataField):
inner_dict_to_flatten[field_name] = field.get(obj)
outer_dict_to_flatten[key] = inner_dict_to_flatten
return RdyToFlattenDict(outer_dict_to_flatten)
@property
def model(self):
if self._filter_func is not None:
new_model = RdyToFlattenDict()
for k, v in self._obj.items():
if self._filter_func(k, v):
new_model[k] = v
return new_model
else:
return self._obj
class CombinedViewsWrapper(RdyToFlattenList):
"""List-like wrapper for views. It provides same interface as any other
views but enables as to access all views in bulk.
It wraps and return responses from all views as a list. Be aware that
in case of DictViews wrapped in CombinedViewsWrapper you loose
information about dict keys.
"""
def __init__(self, obj):
super(CombinedViewsWrapper, self).__init__(obj)
self._obj = obj
def combine_related(self, field_name):
return CombinedViewsWrapper(
list(_flatten(
[obj.combine_related(field_name) for obj in self._obj]
))
)
def c_rel(self, *args, **kwargs):
return self.combine_related(*args, **kwargs)
def encode(self):
return list(_flatten([obj.encode() for obj in self._obj]))
def get_field(self, field_name):
return list(_flatten([obj.get_field(field_name) for obj in self._obj]))
@property
def model(self):
return list(_flatten([obj.model for obj in self._obj]))
def apply_filter(self, filter_func):
for obj in self._obj:
obj.apply_filter(filter_func)
def clear_filter(self):
for obj in self._obj:
obj.clear_filter()
def _flatten(l, max_level=10):
"""Generator function going deep in tree-like structures
(i.e. dicts in dicts or lists in lists etc.) and returning all elements as
a flat list. It's flattening only lists and dicts which are subclasses of
RdyToFlattenCollection. Regular lists and dicts are treated as a
single items.
:param l: some iterable to be flattened
:return: flattened iterator
"""
if max_level >= 0:
_iter = l.values() if isinstance(l, dict) else l
for el in _iter:
if isinstance(el, RdyToFlattenCollection):
for sub in _flatten(el, max_level=max_level - 1):
yield sub
else:
yield el
else:
yield l
def _create_collection_view(detail_view_class, name, encode=None,
view_class=None):
assert issubclass(detail_view_class, OperatorDetailView)
class_fields = detail_view_class._collect_fields()
if encode is not None:
class_fields.update({'encode': encode})
return type(name, (view_class,), class_fields)
def create_dict_view_class(detail_view_class, name):
encode = None
if 'encode' in dir(detail_view_class):
def encode(self):
dict_to_flatten = {}
for key, obj in self.model.items():
dict_to_flatten[key] = detail_view_class(obj).encode()
return RdyToFlattenDict(dict_to_flatten)
return _create_collection_view(
detail_view_class, name, encode, OperatorDictView
)
def create_list_view_class(detail_view_class, name):
encode = None
if 'encode' in dir(detail_view_class):
def encode(self):
return RdyToFlattenList([detail_view_class(obj).encode()
for obj in self.model])
return _create_collection_view(
detail_view_class, name, encode, OperatorListView
)
|
"""Test for the estimate_pi example."""
import logging
import unittest
from apache_beam.examples.complete import estimate_pi
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import BeamAssertException
def in_between(lower, upper):
def _in_between(actual):
_, _, estimate = actual[0]
if estimate < lower or estimate > upper:
raise BeamAssertException(
'Failed assert: %f not in [%f, %f]' % (estimate, lower, upper))
return _in_between
class EstimatePiTest(unittest.TestCase):
def test_basics(self):
with TestPipeline() as p:
result = p | 'Estimate' >> estimate_pi.EstimatePiTransform(5000)
# Note: Probabilistically speaking this test can fail with a probability
# that is very small (VERY) given that we run at least 500 thousand
# trials.
assert_that(result, in_between(3.125, 3.155))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
"""
Support for Proliphix NT10e Thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.proliphix/
"""
import voluptuous as vol
from homeassistant.components.climate import (
PRECISION_TENTHS, STATE_COOL, STATE_HEAT, STATE_IDLE,
ClimateDevice, PLATFORM_SCHEMA, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, TEMP_FAHRENHEIT, ATTR_TEMPERATURE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['proliphix==0.4.1']
ATTR_FAN = 'fan'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Proliphix thermostats."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
import proliphix
pdp = proliphix.PDP(host, username, password)
add_devices([ProliphixThermostat(pdp)])
class ProliphixThermostat(ClimateDevice):
"""Representation a Proliphix thermostat."""
def __init__(self, pdp):
"""Initialize the thermostat."""
self._pdp = pdp
self._pdp.update()
self._name = self._pdp.name
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def should_poll(self):
"""Set up polling needed for thermostat."""
return True
def update(self):
"""Update the data from the thermostat."""
self._pdp.update()
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def precision(self):
"""Return the precision of the system.
Proliphix temperature values are passed back and forth in the
API as tenths of degrees F (i.e. 690 for 69 degrees).
"""
return PRECISION_TENTHS
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_FAN: self._pdp.fan_state
}
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self._pdp.cur_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._pdp.setback
@property
def current_operation(self):
"""Return the current state of the thermostat."""
state = self._pdp.hvac_state
if state in (1, 2):
return STATE_IDLE
elif state == 3:
return STATE_HEAT
elif state == 6:
return STATE_COOL
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._pdp.setback = temperature
|
"""
:mod:`nova` -- Cloud IaaS Platform
===================================
.. automodule:: nova
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@gmail.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
import gettext
gettext.install('nova', unicode=1)
|
import os
import time
from graphite.node import LeafNode, BranchNode
from graphite.intervals import Interval, IntervalSet
from graphite.carbonlink import CarbonLink
from graphite.logger import log
from django.conf import settings
try:
import whisper
except ImportError:
whisper = False
try:
import rrdtool
except ImportError:
rrdtool = False
try:
import gzip
except ImportError:
gzip = False
class FetchInProgress(object):
def __init__(self, wait_callback):
self.wait_callback = wait_callback
def waitForResults(self):
return self.wait_callback()
class MultiReader(object):
__slots__ = ('nodes',)
def __init__(self, nodes):
self.nodes = nodes
def get_intervals(self):
interval_sets = []
for node in self.nodes:
interval_sets.extend( node.intervals.intervals )
return IntervalSet( sorted(interval_sets) )
def fetch(self, startTime, endTime):
# Start the fetch on each node
results = [ n.fetch(startTime, endTime) for n in self.nodes ]
# Wait for any asynchronous operations to complete
for i, result in enumerate(results):
if isinstance(result, FetchInProgress):
try:
results[i] = result.waitForResults()
except:
log.exception("Failed to complete subfetch")
results[i] = None
results = [r for r in results if r is not None]
if not results:
raise Exception("All sub-fetches failed")
return reduce(self.merge, results)
def merge(self, results1, results2):
# Ensure results1 is finer than results2
if results1[0][2] > results2[0][2]:
results1, results2 = results2, results1
time_info1, values1 = results1
time_info2, values2 = results2
start1, end1, step1 = time_info1
start2, end2, step2 = time_info2
step = step1 # finest step
start = min(start1, start2) # earliest start
end = max(end1, end2) # latest end
time_info = (start, end, step)
values = []
t = start
while t < end:
# Look for the finer precision value first if available
i1 = (t - start1) / step1
if len(values1) > i1:
v1 = values1[i1]
else:
v1 = None
if v1 is None:
i2 = (t - start2) / step2
if len(values2) > i2:
v2 = values2[i2]
else:
v2 = None
values.append(v2)
else:
values.append(v1)
t += step
return (time_info, values)
class CeresReader(object):
__slots__ = ('ceres_node', 'real_metric_path')
supported = True
def __init__(self, ceres_node, real_metric_path):
self.ceres_node = ceres_node
self.real_metric_path = real_metric_path
def get_intervals(self):
intervals = []
for info in self.ceres_node.slice_info:
(start, end, step) = info
intervals.append( Interval(start, end) )
return IntervalSet(intervals)
def fetch(self, startTime, endTime):
data = self.ceres_node.read(startTime, endTime)
time_info = (data.startTime, data.endTime, data.timeStep)
values = list(data.values)
# Merge in data from carbon's cache
try:
cached_datapoints = CarbonLink.query(self.real_metric_path)
except:
log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
cached_datapoints = []
for (timestamp, value) in cached_datapoints:
interval = timestamp - (timestamp % data.timeStep)
try:
i = int(interval - data.startTime) / data.timeStep
values[i] = value
except:
pass
return (time_info, values)
class WhisperReader(object):
__slots__ = ('fs_path', 'real_metric_path')
supported = bool(whisper)
def __init__(self, fs_path, real_metric_path):
self.fs_path = fs_path
self.real_metric_path = real_metric_path
def get_intervals(self):
start = time.time() - whisper.info(self.fs_path)['maxRetention']
end = max( os.stat(self.fs_path).st_mtime, start )
return IntervalSet( [Interval(start, end)] )
def fetch(self, startTime, endTime):
data = whisper.fetch(self.fs_path, startTime, endTime)
if not data:
return None
time_info, values = data
(start,end,step) = time_info
# Merge in data from carbon's cache
try:
cached_datapoints = CarbonLink.query(self.real_metric_path)
except:
log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
cached_datapoints = []
for (timestamp, value) in cached_datapoints:
interval = timestamp - (timestamp % step)
try:
i = int(interval - start) / step
values[i] = value
except:
pass
return (time_info, values)
class GzippedWhisperReader(WhisperReader):
supported = bool(whisper and gzip)
def get_intervals(self):
fh = gzip.GzipFile(self.fs_path, 'rb')
try:
info = whisper.__readHeader(fh) # evil, but necessary.
finally:
fh.close()
start = time.time() - info['maxRetention']
end = max( os.stat(self.fs_path).st_mtime, start )
return IntervalSet( [Interval(start, end)] )
def fetch(self, startTime, endTime):
fh = gzip.GzipFile(self.fs_path, 'rb')
try:
return whisper.file_fetch(fh, startTime, endTime)
finally:
fh.close()
class RRDReader:
supported = bool(rrdtool)
def __init__(self, fs_path, datasource_name):
self.fs_path = fs_path
self.datasource_name = datasource_name
def get_intervals(self):
start = time.time() - self.get_retention(self.fs_path)
end = max( os.stat(self.fs_path).st_mtime, start )
return IntervalSet( [Interval(start, end)] )
def fetch(self, startTime, endTime):
startString = time.strftime("%H:%M_%Y%m%d+%Ss", time.localtime(startTime))
endString = time.strftime("%H:%M_%Y%m%d+%Ss", time.localtime(endTime))
if settings.FLUSHRRDCACHED:
rrdtool.flushcached(self.fs_path, '--daemon', settings.FLUSHRRDCACHED)
(timeInfo, columns, rows) = rrdtool.fetch(self.fs_path,settings.RRD_CF,'-s' + startString,'-e' + endString)
colIndex = list(columns).index(self.datasource_name)
rows.pop() #chop off the latest value because RRD returns crazy last values sometimes
values = (row[colIndex] for row in rows)
return (timeInfo, values)
@staticmethod
def get_datasources(fs_path):
info = rrdtool.info(fs_path)
if 'ds' in info:
return [datasource_name for datasource_name in info['ds']]
else:
ds_keys = [ key for key in info if key.startswith('ds[') ]
datasources = set( key[3:].split(']')[0] for key in ds_keys )
return list(datasources)
@staticmethod
def get_retention(fs_path):
info = rrdtool.info(fs_path)
if 'rra' in info:
rras = info['rra']
else:
# Ugh, I like the old python-rrdtool api better..
rra_count = max([ int(key[4]) for key in info if key.startswith('rra[') ]) + 1
rras = [{}] * rra_count
for i in range(rra_count):
rras[i]['pdp_per_row'] = info['rra[%d].pdp_per_row' % i]
rras[i]['rows'] = info['rra[%d].rows' % i]
retention_points = 0
for rra in rras:
points = rra['pdp_per_row'] * rra['rows']
if points > retention_points:
retention_points = points
return retention_points * info['step']
|
"""Test runner for typeshed.
Depends on mypy being installed.
Approach:
1. Parse sys.argv
2. Compute appropriate arguments for mypy
3. Stuff those arguments into sys.argv
4. Run mypy.main('')
5. Repeat steps 2-4 for other mypy runs (e.g. --py2)
"""
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser(description="Test runner for typeshed. "
"Patterns are unanchored regexps on the full path.")
parser.add_argument('-v', '--verbose', action='count', default=0, help="More output")
parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually run mypy")
parser.add_argument('-x', '--exclude', type=str, nargs='*', help="Exclude pattern")
parser.add_argument('-p', '--python-version', type=str, nargs='*',
help="These versions only (major[.minor])")
parser.add_argument('filter', type=str, nargs='*', help="Include pattern (default all)")
def log(args, *varargs):
if args.verbose >= 2:
print(*varargs)
def match(fn, args, blacklist):
if blacklist.match(fn):
log(args, fn, 'exluded by blacklist')
return False
if not args.filter and not args.exclude:
log(args, fn, 'accept by default')
return True
if args.exclude:
for f in args.exclude:
if re.search(f, fn):
log(args, fn, 'excluded by pattern', f)
return False
if args.filter:
for f in args.filter:
if re.search(f, fn):
log(args, fn, 'accepted by pattern', f)
return True
if args.filter:
log(args, fn, 'rejected (no pattern matches)')
return False
log(args, fn, 'accepted (no exclude pattern matches)')
return True
def libpath(major, minor):
versions = ['%d.%d' % (major, minor)
for minor in reversed(range(minor + 1))]
versions.append(str(major))
versions.append('2and3')
paths = []
for v in versions:
for top in ['stdlib', 'third_party']:
p = os.path.join(top, v)
if os.path.isdir(p):
paths.append(p)
return paths
def main():
args = parser.parse_args()
with open(os.path.join(os.path.dirname(__file__), "mypy_blacklist.txt")) as f:
blacklist = re.compile("(%s)$" % "|".join(
re.findall(r"^\s*([^\s#]+)\s*(?:#.*)?$", f.read(), flags=re.M)))
try:
from mypy.main import main as mypy_main
except ImportError:
print("Cannot import mypy. Did you install it?")
sys.exit(1)
versions = [(3, 6), (3, 5), (3, 4), (3, 3), (3, 2), (2, 7)]
if args.python_version:
versions = [v for v in versions
if any(('%d.%d' % v).startswith(av) for av in args.python_version)]
if not versions:
print("--- no versions selected ---")
sys.exit(1)
code = 0
runs = 0
for major, minor in versions:
roots = libpath(major, minor)
files = []
seen = {'__builtin__', 'builtins', 'typing'} # Always ignore these.
for root in roots:
names = os.listdir(root)
for name in names:
full = os.path.join(root, name)
mod, ext = os.path.splitext(name)
if mod in seen or mod.startswith('.'):
continue
if ext in ['.pyi', '.py']:
if match(full, args, blacklist):
seen.add(mod)
files.append(full)
elif (os.path.isfile(os.path.join(full, '__init__.pyi')) or
os.path.isfile(os.path.join(full, '__init__.py'))):
for r, ds, fs in os.walk(full):
ds.sort()
fs.sort()
for f in fs:
m, x = os.path.splitext(f)
if x in ['.pyi', '.py']:
fn = os.path.join(r, f)
if match(fn, args, blacklist):
seen.add(mod)
files.append(fn)
if files:
runs += 1
flags = ['--python-version', '%d.%d' % (major, minor)]
flags.append('--strict-optional')
# flags.append('--warn-unused-ignores') # Fast parser and regular parser disagree.
sys.argv = ['mypy'] + flags + files
if args.verbose:
print("running", ' '.join(sys.argv))
else:
print("running mypy", ' '.join(flags), "# with", len(files), "files")
try:
if not args.dry_run:
mypy_main('')
except SystemExit as err:
code = max(code, err.code)
if code:
print("--- exit status", code, "---")
sys.exit(code)
if not runs:
print("--- nothing to do; exit 1 ---")
sys.exit(1)
if __name__ == '__main__':
main()
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0102_convert_muted_topic'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='muted_topics',
),
]
|
from django import template
register = template.Library()
@register.inclusion_tag('rapidsms/templatetags/form.html')
def render_form(form):
return {"form": form}
|
from time import time
import argparse
import os
from pprint import pprint
import numpy as np
from threadpoolctl import threadpool_limits
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.ensemble._hist_gradient_boosting.utils import (
get_equivalent_estimator)
parser = argparse.ArgumentParser()
parser.add_argument('--n-leaf-nodes', type=int, default=31)
parser.add_argument('--n-trees', type=int, default=10)
parser.add_argument('--lightgbm', action="store_true", default=False,
help='also benchmark lightgbm')
parser.add_argument('--xgboost', action="store_true", default=False,
help='also benchmark xgboost')
parser.add_argument('--catboost', action="store_true", default=False,
help='also benchmark catboost')
parser.add_argument('--learning-rate', type=float, default=.1)
parser.add_argument('--problem', type=str, default='classification',
choices=['classification', 'regression'])
parser.add_argument('--loss', type=str, default='default')
parser.add_argument('--missing-fraction', type=float, default=0)
parser.add_argument('--n-classes', type=int, default=2)
parser.add_argument('--n-samples', type=int, default=int(1e6))
parser.add_argument('--n-features', type=int, default=100)
parser.add_argument('--max-bins', type=int, default=255)
parser.add_argument('--print-params', action="store_true", default=False)
parser.add_argument('--random-sample-weights', action="store_true",
default=False,
help="generate and use random sample weights")
parser.add_argument('--plot', action="store_true", default=False,
help='show a plot results')
parser.add_argument('--plot-filename', default=None,
help='filename to save the figure to disk')
args = parser.parse_args()
n_samples = args.n_samples
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
lr = args.learning_rate
max_bins = args.max_bins
print("Data size: %d samples train, %d samples test."
% (n_samples, n_samples))
print(f"n_features: {args.n_features}")
def get_estimator_and_data():
if args.problem == 'classification':
X, y = make_classification(args.n_samples * 2,
n_features=args.n_features,
n_classes=args.n_classes,
n_clusters_per_class=1,
n_informative=args.n_features // 2,
random_state=0)
return X, y, HistGradientBoostingClassifier
elif args.problem == 'regression':
X, y = make_regression(args.n_samples_max * 2,
n_features=args.n_features, random_state=0)
return X, y, HistGradientBoostingRegressor
X, y, Estimator = get_estimator_and_data()
if args.missing_fraction:
mask = np.random.binomial(1, args.missing_fraction, size=X.shape).astype(
bool)
X[mask] = np.nan
if args.random_sample_weights:
sample_weight = np.random.rand(len(X)) * 10
else:
sample_weight = None
if sample_weight is not None:
(X_train_, X_test_, y_train_, y_test_,
sample_weight_train_, _) = train_test_split(
X, y, sample_weight, test_size=0.5, random_state=0)
else:
X_train_, X_test_, y_train_, y_test_ = train_test_split(
X, y, test_size=0.5, random_state=0)
sample_weight_train_ = None
sklearn_est = Estimator(
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=0,
)
loss = args.loss
if args.problem == 'classification':
if loss == 'default':
# loss='auto' does not work with get_equivalent_estimator()
loss = 'binary_crossentropy' if args.n_classes == 2 else \
'categorical_crossentropy'
else:
# regression
if loss == 'default':
loss = 'least_squares'
sklearn_est.set_params(loss=loss)
if args.print_params:
print("scikit-learn")
pprint(sklearn_est.get_params())
for libname in ["lightgbm", "xgboost", "catboost"]:
if getattr(args, libname):
print(libname)
est = get_equivalent_estimator(sklearn_est, lib=libname)
pprint(est.get_params())
def one_run(n_threads, n_samples):
X_train = X_train_[:n_samples]
X_test = X_test_[:n_samples]
y_train = y_train_[:n_samples]
y_test = y_test_[:n_samples]
if sample_weight is not None:
sample_weight_train = sample_weight_train_[:n_samples]
else:
sample_weight_train = None
assert X_train.shape[0] == n_samples
assert X_test.shape[0] == n_samples
print("Fitting a sklearn model...")
tic = time()
est = sklearn.base.clone(sklearn_est)
with threadpool_limits(n_threads, user_api="openmp"):
est.fit(X_train, y_train, sample_weight=sample_weight_train)
sklearn_fit_duration = time() - tic
tic = time()
sklearn_score = est.score(X_test, y_test)
sklearn_score_duration = time() - tic
print("score: {:.4f}".format(sklearn_score))
print("fit duration: {:.3f}s,".format(sklearn_fit_duration))
print("score duration: {:.3f}s,".format(sklearn_score_duration))
lightgbm_score = None
lightgbm_fit_duration = None
lightgbm_score_duration = None
if args.lightgbm:
print("Fitting a LightGBM model...")
lightgbm_est = get_equivalent_estimator(est, lib='lightgbm')
lightgbm_est.set_params(num_threads=n_threads)
tic = time()
lightgbm_est.fit(X_train, y_train, sample_weight=sample_weight_train)
lightgbm_fit_duration = time() - tic
tic = time()
lightgbm_score = lightgbm_est.score(X_test, y_test)
lightgbm_score_duration = time() - tic
print("score: {:.4f}".format(lightgbm_score))
print("fit duration: {:.3f}s,".format(lightgbm_fit_duration))
print("score duration: {:.3f}s,".format(lightgbm_score_duration))
xgb_score = None
xgb_fit_duration = None
xgb_score_duration = None
if args.xgboost:
print("Fitting an XGBoost model...")
xgb_est = get_equivalent_estimator(est, lib='xgboost')
xgb_est.set_params(nthread=n_threads)
tic = time()
xgb_est.fit(X_train, y_train, sample_weight=sample_weight_train)
xgb_fit_duration = time() - tic
tic = time()
xgb_score = xgb_est.score(X_test, y_test)
xgb_score_duration = time() - tic
print("score: {:.4f}".format(xgb_score))
print("fit duration: {:.3f}s,".format(xgb_fit_duration))
print("score duration: {:.3f}s,".format(xgb_score_duration))
cat_score = None
cat_fit_duration = None
cat_score_duration = None
if args.catboost:
print("Fitting a CatBoost model...")
cat_est = get_equivalent_estimator(est, lib='catboost')
cat_est.set_params(thread_count=n_threads)
tic = time()
cat_est.fit(X_train, y_train, sample_weight=sample_weight_train)
cat_fit_duration = time() - tic
tic = time()
cat_score = cat_est.score(X_test, y_test)
cat_score_duration = time() - tic
print("score: {:.4f}".format(cat_score))
print("fit duration: {:.3f}s,".format(cat_fit_duration))
print("score duration: {:.3f}s,".format(cat_score_duration))
return (sklearn_score, sklearn_fit_duration, sklearn_score_duration,
lightgbm_score, lightgbm_fit_duration, lightgbm_score_duration,
xgb_score, xgb_fit_duration, xgb_score_duration,
cat_score, cat_fit_duration, cat_score_duration)
max_threads = os.cpu_count()
n_threads_list = [2 ** i for i in range(8) if (2 ** i) < max_threads]
n_threads_list.append(max_threads)
sklearn_scores = []
sklearn_fit_durations = []
sklearn_score_durations = []
lightgbm_scores = []
lightgbm_fit_durations = []
lightgbm_score_durations = []
xgb_scores = []
xgb_fit_durations = []
xgb_score_durations = []
cat_scores = []
cat_fit_durations = []
cat_score_durations = []
for n_threads in n_threads_list:
print(f"n_threads: {n_threads}")
(
sklearn_score,
sklearn_fit_duration,
sklearn_score_duration,
lightgbm_score,
lightgbm_fit_duration,
lightgbm_score_duration,
xgb_score,
xgb_fit_duration,
xgb_score_duration,
cat_score,
cat_fit_duration,
cat_score_duration
) = one_run(n_threads, n_samples)
for scores, score in (
(sklearn_scores, sklearn_score),
(sklearn_fit_durations, sklearn_fit_duration),
(sklearn_score_durations, sklearn_score_duration),
(lightgbm_scores, lightgbm_score),
(lightgbm_fit_durations, lightgbm_fit_duration),
(lightgbm_score_durations, lightgbm_score_duration),
(xgb_scores, xgb_score),
(xgb_fit_durations, xgb_fit_duration),
(xgb_score_durations, xgb_score_duration),
(cat_scores, cat_score),
(cat_fit_durations, cat_fit_duration),
(cat_score_durations, cat_score_duration)):
scores.append(score)
if args.plot or args.plot_filename:
import matplotlib.pyplot as plt
import matplotlib
fig, axs = plt.subplots(2, figsize=(12, 12))
label = f"sklearn {sklearn.__version__}"
axs[0].plot(n_threads_list, sklearn_fit_durations, label=label)
axs[1].plot(n_threads_list, sklearn_score_durations, label=label)
if args.lightgbm:
import lightgbm
label = f'LightGBM {lightgbm.__version__}'
axs[0].plot(n_threads_list, lightgbm_fit_durations, label=label)
axs[1].plot(n_threads_list, lightgbm_score_durations, label=label)
if args.xgboost:
import xgboost
label = f'XGBoost {xgboost.__version__}'
axs[0].plot(n_threads_list, xgb_fit_durations, label=label)
axs[1].plot(n_threads_list, xgb_score_durations, label=label)
if args.catboost:
import catboost
label = f'CatBoost {catboost.__version__}'
axs[0].plot(n_threads_list, cat_fit_durations, label=label)
axs[1].plot(n_threads_list, cat_score_durations, label=label)
for ax in axs:
ax.set_xscale('log')
ax.set_xlabel('n_threads')
ax.set_ylabel('duration (s)')
ax.set_ylim(0, None)
ax.set_xticks(n_threads_list)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.legend(loc='best')
axs[0].set_title('fit duration (s)')
axs[1].set_title('score duration (s)')
title = args.problem
if args.problem == 'classification':
title += ' n_classes = {}'.format(args.n_classes)
fig.suptitle(title)
plt.tight_layout()
if args.plot_filename:
plt.savefig(args.plot_filename)
if args.plot:
plt.show()
|
import time
from datetime import datetime
from math import floor
from django.conf import settings
from django.test import RequestFactory
from django.test.utils import override_settings
from django.utils.http import parse_http_date
from bedrock.base.urlresolvers import reverse
from mock import patch
from nose.tools import eq_, ok_
from bedrock.mozorg.tests import TestCase
from bedrock.tabzilla.middleware import TabzillaLocaleURLMiddleware
from bedrock.tabzilla.views import template_last_modified
@patch('bedrock.tabzilla.views.os.path.getmtime')
@patch('bedrock.tabzilla.views.loader.get_template')
class LastModifiedTests(TestCase):
def test_youngest_file_wins(self, template_mock, mtime_mock):
tmpl_name = 'the_dude_is_a_template.html'
template_mock.return_value.filename = tmpl_name
mtimes = [1378762234.0, 1378762235.0]
mtime_mock.side_effect = mtimes
func = template_last_modified(tmpl_name)
datestamp = func({})
self.assertEqual(datestamp, datetime.fromtimestamp(max(mtimes)))
mtime_mock.assert_any_call(tmpl_name)
langfile = '{0}/locale/en-US/tabzilla/tabzilla.lang'.format(settings.ROOT)
mtime_mock.assert_any_call(langfile)
class TabzillaViewTests(TestCase):
def test_tabzilla_content_type(self):
""" Content-Type header should be text/javascript. """
with self.activate('en-US'):
resp = self.client.get(reverse('tabzilla'))
self.assertEqual(resp['content-type'], 'text/javascript')
def test_cache_headers(self):
"""
Should have appropriate Cache-Control and Expires headers.
"""
with self.activate('en-US'):
resp = self.client.get(reverse('tabzilla'))
self.assertEqual(resp['cache-control'], 'max-age=43200') # 12h
now_date = floor(time.time())
exp_date = parse_http_date(resp['expires'])
self.assertAlmostEqual(now_date + 43200, exp_date, delta=2)
@patch.object(settings, 'DEV_LANGUAGES', ['en-US', 'de'])
@patch.object(settings, 'PROD_LANGUAGES', ['en-US', 'de'])
class TabzillaRedirectTests(TestCase):
def _process_request(self, url):
rf = RequestFactory()
req = rf.get(url)
return TabzillaLocaleURLMiddleware().process_request(req)
@patch('bedrock.tabzilla.urls.default_collector')
@patch('bedrock.tabzilla.urls.Packager')
def test_tabzilla_css_redirect(self, packager_mock, collector_mock):
"""
Tabzilla css redirect should use STATIC_URL setting and switch
based on DEBUG setting.
Bug 826866.
"""
packager = packager_mock.return_value
package = packager.package_for.return_value
package.output_filename = settings.PIPELINE_CSS['tabzilla']['output_filename']
packager.compile.return_value = ['css/tabzilla/tabzilla.css']
tabzilla_css_url = '/en-US/tabzilla/media/css/tabzilla.css'
with override_settings(DEBUG=False):
with self.activate('en-US'):
response = self.client.get(tabzilla_css_url)
eq_(response.status_code, 301)
ok_(response['location'].endswith('/css/tabzilla-min.css'), response['location'])
with override_settings(DEBUG=True):
with self.activate('en-US'):
response = self.client.get(tabzilla_css_url)
eq_(response.status_code, 301)
ok_(response['location'].endswith('/css/tabzilla/tabzilla.css'), response['location'])
@patch('bedrock.tabzilla.urls.default_collector')
@patch('bedrock.tabzilla.urls.Packager')
def test_tabzilla_css_less_processing(self, packager_mock, collector_mock):
"""
The tabzilla.less file should be compiled by the redirect if
settings.DEBUG is True.
"""
compiler = packager_mock.return_value.compile
tabzilla_css_url = '/en-US/tabzilla/media/css/tabzilla.css'
with override_settings(DEBUG=False):
with self.activate('en-US'):
self.client.get(tabzilla_css_url)
eq_(compiler.call_count, 0)
with override_settings(DEBUG=True):
with self.activate('en-US'):
self.client.get(tabzilla_css_url)
eq_(compiler.call_count, 1)
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '//example.com')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', True)
def test_no_cdn_redirect_middleware_template_debug(self):
"""
Tabzilla should NOT redirect to a CDN when it redirects to a locale
when TEMPLATE_DEBUG = True.
"""
resp = self._process_request('/tabzilla/tabzilla.js')
eq_(resp['location'], '/en-US/tabzilla/tabzilla.js')
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '//example.com')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', False)
def test_no_cdn_redirect_middleware_specified_locale(self):
"""
Tabzilla should NOT redirect to a CDN when it doesn't need to redirect
to a locale.
"""
resp = self._process_request('/en-US/tabzilla/tabzilla.js')
ok_(resp is None)
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', True)
def test_no_cdn_redirect_middleware_no_cdn(self):
"""
Tabzilla should NOT redirect to a CDN when it redirects to a locale
when no CDN is configured.
"""
resp = self._process_request('/tabzilla/tabzilla.js')
eq_(resp['location'], '/en-US/tabzilla/tabzilla.js')
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '//example.com')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', False)
def test_cdn_redirect_middleware(self):
"""
Tabzilla should redirect to a CDN when it redirects to a locale
"""
resp = self._process_request('/tabzilla/tabzilla.js')
eq_(resp['location'], 'http://example.com/en-US/tabzilla/tabzilla.js')
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '//example.com')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', False)
def test_no_cdn_redirect_middleware(self):
"""
Middleware should NOT redirect to a CDN when it's not tabzilla
"""
resp = self._process_request('/')
eq_(resp['location'], '/en-US/')
@override_settings(DEV=False)
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '//example.com')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', False)
@patch('lib.l10n_utils.template_is_active')
def test_redirect_to_cdn_inactive_locale(self, lang_mock):
"""
The view should redirect to the CDN when the locale is not active.
"""
lang_mock.return_value = False
resp = self.client.get('/de/tabzilla/tabzilla.js')
eq_(resp['location'], 'http://example.com/en-US/tabzilla/tabzilla.js')
@override_settings(DEV=False)
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '//example.com')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', False)
@patch('lib.l10n_utils.template_is_active')
def test_no_redirect_to_cdn_active_locale(self, lang_mock):
"""
The view should NOT redirect to the CDN when the locale is active.
"""
lang_mock.return_value = True
resp = self.client.get('/de/tabzilla/tabzilla.js')
ok_(resp.status_code == 200)
@override_settings(DEV=False)
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', False)
@patch('lib.l10n_utils.template_is_active')
def test_no_redirect_to_cdn_no_cdn(self, lang_mock):
"""
The view should not redirect to the CDN when the CDN setting is empty.
"""
lang_mock.return_value = False
resp = self.client.get('/de/tabzilla/tabzilla.js')
eq_(resp['location'], 'http://testserver/en-US/tabzilla/tabzilla.js')
@override_settings(DEV=False)
@patch('bedrock.tabzilla.middleware.settings.CDN_BASE_URL', '//example.com')
@patch('bedrock.tabzilla.middleware.settings.TEMPLATE_DEBUG', True)
@patch('lib.l10n_utils.template_is_active')
def test_no_redirect_to_cdn_template_debug(self, lang_mock):
"""
The view should not redirect to the CDN when TEMPLATE_DEBUG is True.
"""
lang_mock.return_value = False
resp = self.client.get('/de/tabzilla/tabzilla.js')
eq_(resp['location'], 'http://testserver/en-US/tabzilla/tabzilla.js')
|
"""Insteon base entity."""
import logging
from pyinsteon import devices
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from .const import (
DOMAIN,
SIGNAL_ADD_DEFAULT_LINKS,
SIGNAL_LOAD_ALDB,
SIGNAL_PRINT_ALDB,
SIGNAL_REMOVE_ENTITY,
SIGNAL_SAVE_DEVICES,
STATE_NAME_LABEL_MAP,
)
from .utils import print_aldb_to_log
_LOGGER = logging.getLogger(__name__)
class InsteonEntity(Entity):
"""INSTEON abstract base entity."""
def __init__(self, device, group):
"""Initialize the INSTEON binary sensor."""
self._insteon_device_group = device.groups[group]
self._insteon_device = device
def __hash__(self):
"""Return the hash of the Insteon Entity."""
return hash(self._insteon_device)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def address(self):
"""Return the address of the node."""
return str(self._insteon_device.address)
@property
def group(self):
"""Return the INSTEON group that the entity responds to."""
return self._insteon_device_group.group
@property
def unique_id(self) -> str:
"""Return a unique ID."""
if self._insteon_device_group.group == 0x01:
uid = self._insteon_device.id
else:
uid = f"{self._insteon_device.id}_{self._insteon_device_group.group}"
return uid
@property
def name(self):
"""Return the name of the node (used for Entity_ID)."""
# Set a base description
description = self._insteon_device.description
if description is None:
description = "Unknown Device"
# Get an extension label if there is one
extension = self._get_label()
if extension:
extension = f" {extension}"
return f"{description} {self._insteon_device.address}{extension}"
@property
def device_state_attributes(self):
"""Provide attributes for display on device card."""
return {"insteon_address": self.address, "insteon_group": self.group}
@property
def device_info(self):
"""Return device information."""
return {
"identifiers": {(DOMAIN, str(self._insteon_device.address))},
"name": f"{self._insteon_device.description} {self._insteon_device.address}",
"model": f"{self._insteon_device.model} (0x{self._insteon_device.cat:02x}, 0x{self._insteon_device.subcat:02x})",
"sw_version": f"{self._insteon_device.firmware:02x} Engine Version: {self._insteon_device.engine_version}",
"manufacturer": "Smart Home",
"via_device": (DOMAIN, str(devices.modem.address)),
}
@callback
def async_entity_update(self, name, address, value, group):
"""Receive notification from transport that new data exists."""
_LOGGER.debug(
"Received update for device %s group %d value %s",
address,
group,
value,
)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register INSTEON update events."""
_LOGGER.debug(
"Tracking updates for device %s group %d name %s",
self.address,
self.group,
self._insteon_device_group.name,
)
self._insteon_device_group.subscribe(self.async_entity_update)
load_signal = f"{self.entity_id}_{SIGNAL_LOAD_ALDB}"
self.async_on_remove(
async_dispatcher_connect(self.hass, load_signal, self._async_read_aldb)
)
print_signal = f"{self.entity_id}_{SIGNAL_PRINT_ALDB}"
async_dispatcher_connect(self.hass, print_signal, self._print_aldb)
default_links_signal = f"{self.entity_id}_{SIGNAL_ADD_DEFAULT_LINKS}"
async_dispatcher_connect(
self.hass, default_links_signal, self._async_add_default_links
)
remove_signal = f"{self._insteon_device.address.id}_{SIGNAL_REMOVE_ENTITY}"
self.async_on_remove(
async_dispatcher_connect(self.hass, remove_signal, self.async_remove)
)
async def async_will_remove_from_hass(self):
"""Unsubscribe to INSTEON update events."""
_LOGGER.debug(
"Remove tracking updates for device %s group %d name %s",
self.address,
self.group,
self._insteon_device_group.name,
)
self._insteon_device_group.unsubscribe(self.async_entity_update)
async def _async_read_aldb(self, reload):
"""Call device load process and print to log."""
await self._insteon_device.aldb.async_load(refresh=reload)
self._print_aldb()
async_dispatcher_send(self.hass, SIGNAL_SAVE_DEVICES)
def _print_aldb(self):
"""Print the device ALDB to the log file."""
print_aldb_to_log(self._insteon_device.aldb)
def _get_label(self):
"""Get the device label for grouped devices."""
label = ""
if len(self._insteon_device.groups) > 1:
if self._insteon_device_group.name in STATE_NAME_LABEL_MAP:
label = STATE_NAME_LABEL_MAP[self._insteon_device_group.name]
else:
label = f"Group {self.group:d}"
return label
async def _async_add_default_links(self):
"""Add default links between the device and the modem."""
await self._insteon_device.async_add_default_links()
|
"""
Produces contiguous completed ranges of recurring tasks.
See RangeDaily and RangeHourly for basic usage.
Caveat - if gaps accumulate, their causes (e.g. missing dependencies) going
unmonitored/unmitigated, then this will eventually keep retrying the same gaps
over and over and make no progress to more recent times. (See 'task_limit' and
'reverse' parameters.)
TODO foolproof against that kind of misuse?
"""
import itertools
import logging
import warnings
import operator
import re
import time
from datetime import datetime, timedelta
from luigi import six
import luigi
from luigi.parameter import ParameterException
from luigi.target import FileSystemTarget
from luigi.task import Register, flatten_output
logger = logging.getLogger('luigi-interface')
class RangeEvent(luigi.Event): # Not sure if subclassing currently serves a purpose. Stringly typed, events are.
"""
Events communicating useful metrics.
COMPLETE_COUNT would normally be nondecreasing, and its derivative would
describe performance (how many instances complete
invocation-over-invocation).
COMPLETE_FRACTION reaching 1 would be a telling event in case of a backfill
with defined start and stop. Would not be strikingly useful for a typical
recurring task without stop defined, fluctuating close to 1.
DELAY is measured from the first found missing datehour till (current time
+ hours_forward), or till stop if it is defined. In hours for Hourly.
TBD different units for other frequencies?
TODO any different for reverse mode? From first missing till last missing?
From last gap till stop?
"""
COMPLETE_COUNT = "event.tools.range.complete.count"
COMPLETE_FRACTION = "event.tools.range.complete.fraction"
DELAY = "event.tools.range.delay"
class RangeBase(luigi.WrapperTask):
"""
Produces a contiguous completed range of a recurring task.
Made for the common use case where a task is parameterized by e.g.
DateParameter, and assurance is needed that any gaps arising from downtime
are eventually filled.
Emits events that one can use to monitor gaps and delays.
At least one of start and stop needs to be specified.
(This is quite an abstract base class for subclasses with different
datetime parameter class, e.g. DateParameter, DateHourParameter, ..., and
different parameter naming, e.g. days_back/forward, hours_back/forward,
..., as well as different documentation wording, for good user experience.)
"""
# TODO lift the single parameter constraint by passing unknown parameters through WrapperTask?
of = luigi.TaskParameter(
description="task name to be completed. The task must take a single datetime parameter")
# The common parameters 'start' and 'stop' have type (e.g. DateParameter,
# DateHourParameter) dependent on the concrete subclass, cumbersome to
# define here generically without dark magic. Refer to the overrides.
start = luigi.Parameter()
stop = luigi.Parameter()
reverse = luigi.BoolParameter(
default=False,
description="specifies the preferred order for catching up. False - work from the oldest missing outputs onward; True - from the newest backward")
task_limit = luigi.IntParameter(
default=50,
description="how many of 'of' tasks to require. Guards against scheduling insane amounts of tasks in one go")
# TODO overridable exclude_datetimes or something...
now = luigi.IntParameter(
default=None,
description="set to override current time. In seconds since epoch")
@property
def of_cls(self):
if isinstance(self.of, six.string_types):
warnings.warn('When using Range programatically, dont pass "of" param as string!')
return Register.get_task_cls(self.of)
return self.of
# a bunch of datetime arithmetic building blocks that need to be provided in subclasses
def datetime_to_parameter(self, dt):
raise NotImplementedError
def parameter_to_datetime(self, p):
raise NotImplementedError
def moving_start(self, now):
"""
Returns a datetime from which to ensure contiguousness in the case when
start is None or unfeasibly far back.
"""
raise NotImplementedError
def moving_stop(self, now):
"""
Returns a datetime till which to ensure contiguousness in the case when
stop is None or unfeasibly far forward.
"""
raise NotImplementedError
def finite_datetimes(self, finite_start, finite_stop):
"""
Returns the individual datetimes in interval [finite_start, finite_stop)
for which task completeness should be required, as a sorted list.
"""
raise NotImplementedError
def _emit_metrics(self, missing_datetimes, finite_start, finite_stop):
"""
For consistent metrics one should consider the entire range, but
it is open (infinite) if stop or start is None.
Hence make do with metrics respective to the finite simplification.
"""
datetimes = self.finite_datetimes(
finite_start if self.start is None else min(finite_start, self.parameter_to_datetime(self.start)),
finite_stop if self.stop is None else max(finite_stop, self.parameter_to_datetime(self.stop)))
delay_in_jobs = len(datetimes) - datetimes.index(missing_datetimes[0]) if datetimes and missing_datetimes else 0
self.trigger_event(RangeEvent.DELAY, self.of_cls.task_family, delay_in_jobs)
expected_count = len(datetimes)
complete_count = expected_count - len(missing_datetimes)
self.trigger_event(RangeEvent.COMPLETE_COUNT, self.of_cls.task_family, complete_count)
self.trigger_event(RangeEvent.COMPLETE_FRACTION, self.of_cls.task_family, float(complete_count) / expected_count if expected_count else 1)
def _format_datetime(self, dt):
return self.datetime_to_parameter(dt)
def _format_range(self, datetimes):
param_first = self._format_datetime(datetimes[0])
param_last = self._format_datetime(datetimes[-1])
return '[%s, %s]' % (param_first, param_last)
def requires(self):
# cache because we anticipate a fair amount of computation
if hasattr(self, '_cached_requires'):
return self._cached_requires
if not self.start and not self.stop:
raise ParameterException("At least one of start and stop needs to be specified")
if not self.start and not self.reverse:
raise ParameterException("Either start needs to be specified or reverse needs to be True")
if self.start and self.stop and self.start > self.stop:
raise ParameterException("Can't have start > stop")
# TODO check overridden complete() and exists()
now = datetime.utcfromtimestamp(time.time() if self.now is None else self.now)
moving_start = self.moving_start(now)
finite_start = moving_start if self.start is None else max(self.parameter_to_datetime(self.start), moving_start)
moving_stop = self.moving_stop(now)
finite_stop = moving_stop if self.stop is None else min(self.parameter_to_datetime(self.stop), moving_stop)
datetimes = self.finite_datetimes(finite_start, finite_stop) if finite_start <= finite_stop else []
task_cls = self.of_cls
if datetimes:
logger.debug('Actually checking if range %s of %s is complete',
self._format_range(datetimes), self.of_cls.task_family)
missing_datetimes = sorted(self.missing_datetimes(task_cls, datetimes))
logger.debug('Range %s lacked %d of expected %d %s instances',
self._format_range(datetimes), len(missing_datetimes), len(datetimes), self.of_cls.task_family)
else:
missing_datetimes = []
logger.debug('Empty range. No %s instances expected', self.of_cls.task_family)
self._emit_metrics(missing_datetimes, finite_start, finite_stop)
if self.reverse:
required_datetimes = missing_datetimes[-self.task_limit:]
else:
required_datetimes = missing_datetimes[:self.task_limit]
if required_datetimes:
logger.debug('Requiring %d missing %s instances in range %s',
len(required_datetimes), self.of_cls.task_family, self._format_range(required_datetimes))
if self.reverse:
required_datetimes.reverse() # TODO priorities, so that within the batch tasks are ordered too
self._cached_requires = [task_cls(self.datetime_to_parameter(d)) for d in required_datetimes]
return self._cached_requires
def missing_datetimes(self, task_cls, finite_datetimes):
"""
Override in subclasses to do bulk checks.
Returns a sorted list.
This is a conservative base implementation that brutally checks completeness, instance by instance.
Inadvisable as it may be slow.
"""
return [d for d in finite_datetimes if not task_cls(self.datetime_to_parameter(d)).complete()]
class RangeDailyBase(RangeBase):
"""
Produces a contiguous completed range of a daily recurring task.
"""
start = luigi.DateParameter(
default=None,
description="beginning date, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateParameter(
default=None,
description="ending date, exclusive. Default: None - work forward forever")
days_back = luigi.IntParameter(
default=100, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in days from current time. Prevents infinite loop "
"when start is none. If the dataset has limited retention"
" (i.e. old outputs get removed), this should be set "
"shorter to that, too, to prevent the oldest outputs "
"flapping. Increase freely if you intend to process old "
"dates - worker's memory is the limit"))
days_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in days from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt.date()
def parameter_to_datetime(self, p):
return datetime(p.year, p.month, p.day)
def moving_start(self, now):
return now - timedelta(days=self.days_back)
def moving_stop(self, now):
return now + timedelta(days=self.days_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to turn of day.
"""
date_start = datetime(finite_start.year, finite_start.month, finite_start.day)
dates = []
for i in itertools.count():
t = date_start + timedelta(days=i)
if t >= finite_stop:
return dates
if t >= finite_start:
dates.append(t)
class RangeHourlyBase(RangeBase):
"""
Produces a contiguous completed range of an hourly recurring task.
"""
start = luigi.DateHourParameter(
default=None,
description="beginning datehour, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateHourParameter(
default=None,
description="ending datehour, exclusive. Default: None - work forward forever")
hours_back = luigi.IntParameter(
default=100 * 24, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in hours from current time. Prevents infinite "
"loop when start is none. If the dataset has limited "
"retention (i.e. old outputs get removed), this should "
"be set shorter to that, too, to prevent the oldest "
"outputs flapping. Increase freely if you intend to "
"process old dates - worker's memory is the limit"))
# TODO always entire interval for reprocessings (fixed start and stop)?
hours_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in hours from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt
def parameter_to_datetime(self, p):
return p
def moving_start(self, now):
return now - timedelta(hours=self.hours_back)
def moving_stop(self, now):
return now + timedelta(hours=self.hours_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to whole hours.
"""
datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour)
datehours = []
for i in itertools.count():
t = datehour_start + timedelta(hours=i)
if t >= finite_stop:
return datehours
if t >= finite_start:
datehours.append(t)
def _format_datetime(self, dt):
return luigi.DateHourParameter().serialize(dt)
def _constrain_glob(glob, paths, limit=5):
"""
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths.
"""
def digit_set_wildcard(chars):
"""
Makes a wildcard expression for the set, a bit readable, e.g. [1-5].
"""
chars = sorted(chars)
if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1:
return '[%s-%s]' % (chars[0], chars[-1])
else:
return '[%s]' % ''.join(chars)
current = {glob: paths}
while True:
pos = list(current.keys())[0].find('[0-9]')
if pos == -1:
# no wildcard expressions left to specialize in the glob
return list(current.keys())
char_sets = {}
for g, p in six.iteritems(current):
char_sets[g] = sorted(set(path[pos] for path in p))
if sum(len(s) for s in char_sets.values()) > limit:
return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current]
for g, s in six.iteritems(char_sets):
for c in s:
new_glob = g.replace('[0-9]', c, 1)
new_paths = list(filter(lambda p: p[pos] == c, current[g]))
current[new_glob] = new_paths
del current[g]
def most_common(items):
"""
Wanted functionality from Counters (new in Python 2.7).
"""
counts = {}
for i in items:
counts.setdefault(i, 0)
counts[i] += 1
return max(six.iteritems(counts), key=operator.itemgetter(1))
def _get_per_location_glob(tasks, outputs, regexes):
"""
Builds a glob listing existing output paths.
Esoteric reverse engineering, but worth it given that (compared to an
equivalent contiguousness guarantee by naive complete() checks)
requests to the filesystem are cut by orders of magnitude, and users
don't even have to retrofit existing tasks anyhow.
"""
paths = [o.path for o in outputs]
# naive, because some matches could be confused by numbers earlier
# in path, e.g. /foo/fifa2000k/bar/2000-12-31/00
matches = [r.search(p) for r, p in zip(regexes, paths)]
for m, p, t in zip(matches, paths, tasks):
if m is None:
raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t))
n_groups = len(matches[0].groups())
# the most common position of every group is likely
# to be conclusive hit or miss
positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)]
glob = list(paths[0]) # FIXME sanity check that it's the same for all paths
for start, end in positions:
glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:]
# chop off the last path item
# (wouldn't need to if `hadoop fs -ls -d` equivalent were available)
return ''.join(glob).rsplit('/', 1)[0]
def _get_filesystems_and_globs(datetime_to_task, datetime_to_re):
"""
Yields a (filesystem, glob) tuple per every output location of task.
The task can have one or several FileSystemTarget outputs.
For convenience, the task can be a luigi.WrapperTask,
in which case outputs of all its dependencies are considered.
"""
# probe some scattered datetimes unlikely to all occur in paths, other than by being sincere datetime parameter's representations
# TODO limit to [self.start, self.stop) so messages are less confusing? Done trivially it can kill correctness
sample_datetimes = [datetime(y, m, d, h) for y in range(2000, 2050, 10) for m in range(1, 4) for d in range(5, 8) for h in range(21, 24)]
regexes = [re.compile(datetime_to_re(d)) for d in sample_datetimes]
sample_tasks = [datetime_to_task(d) for d in sample_datetimes]
sample_outputs = [flatten_output(t) for t in sample_tasks]
for o, t in zip(sample_outputs, sample_tasks):
if len(o) != len(sample_outputs[0]):
raise NotImplementedError("Outputs must be consistent over time, sorry; was %r for %r and %r for %r" % (o, t, sample_outputs[0], sample_tasks[0]))
# TODO fall back on requiring last couple of days? to avoid astonishing blocking when changes like that are deployed
# erm, actually it's not hard to test entire hours_back..hours_forward and split into consistent subranges FIXME?
for target in o:
if not isinstance(target, FileSystemTarget):
raise NotImplementedError("Output targets must be instances of FileSystemTarget; was %r for %r" % (target, t))
for o in zip(*sample_outputs): # transposed, so here we're iterating over logical outputs, not datetimes
glob = _get_per_location_glob(sample_tasks, o, regexes)
yield o[0].fs, glob
def _list_existing(filesystem, glob, paths):
"""
Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths.
"""
globs = _constrain_glob(glob, paths)
time_start = time.time()
listing = []
for g in sorted(globs):
logger.debug('Listing %s', g)
if filesystem.exists(g):
listing.extend(filesystem.listdir(g))
logger.debug('%d %s listings took %f s to return %d items',
len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing))
return set(listing)
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re):
"""
Efficiently determines missing datetimes by filesystem listing.
The current implementation works for the common case of a task writing
output to a FileSystemTarget whose path is built using strftime with format
like '...%Y...%m...%d...%H...', without custom complete() or exists().
(Eventually Luigi could have ranges of completion as first-class citizens.
Then this listing business could be factored away/be provided for
explicitly in target API or some kind of a history server.)
"""
filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re)
paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes]
listing = set()
for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes
listing |= _list_existing(f, g, p)
# quickly learn everything that's missing
missing_datetimes = []
for d, p in zip(datetimes, paths_by_datetime):
if not set(p) <= listing:
missing_datetimes.append(d)
return missing_datetimes
class RangeDaily(RangeDailyBase):
"""Efficiently produces a contiguous completed range of a daily recurring
task that takes a single DateParameter.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeDaily --of YourActualTask --start 2014-01-01
"""
def missing_datetimes(self, task_cls, finite_datetimes):
try:
return set(finite_datetimes) - set(map(self.parameter_to_datetime, task_cls.bulk_complete(map(self.datetime_to_parameter, finite_datetimes))))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d)'))
class RangeHourly(RangeHourlyBase):
"""Efficiently produces a contiguous completed range of an hourly recurring
task that takes a single DateHourParameter.
Benefits from bulk_complete information to efficiently cover gaps.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeHourly --of YourActualTask --start 2014-01-01T00
"""
def missing_datetimes(self, task_cls, finite_datetimes):
try:
return set(finite_datetimes) - set(map(self.parameter_to_datetime, task_cls.bulk_complete(list(map(self.datetime_to_parameter, finite_datetimes)))))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'))
|
from __future__ import print_function
import sys, optparse, time
import logging
from proton import *
try:
long()
except:
long = int
usage = """
Usage: msgr-send [OPTIONS]
-a <addr>[,<addr>]* \tThe target address [amqp[s]://domain[/name]]
-c # \tNumber of messages to send before exiting [0=forever]
-b # \tSize of message body in bytes [1024]
-p # \tSend batches of # messages (wait for replies before sending next batch if -R) [1024]
-w # \t# outgoing window size [0]
-e # \t# seconds to report statistics, 0 = end of test [0]
-R \tWait for a reply to each sent message
-t # \tInactivity timeout in seconds, -1 = no timeout [-1]
-W # \tIncoming window size [0]
-B # \tArgument to Messenger::recv(n) [-1]
-N <name> \tSet the container name to <name>
-V \tEnable debug logging"""
def parse_options( argv ):
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", dest="targets", action="append", type="string")
parser.add_option("-c", dest="msg_count", type="int", default=0)
parser.add_option("-b", dest="msg_size", type="int", default=1024)
parser.add_option("-p", dest="send_batch", type="int", default=1024)
parser.add_option("-w", dest="outgoing_window", type="int")
parser.add_option("-e", dest="report_interval", type="int", default=0)
parser.add_option("-R", dest="get_replies", action="store_true")
parser.add_option("-t", dest="timeout", type="int", default=-1)
parser.add_option("-W", dest="incoming_window", type="int")
parser.add_option("-B", dest="recv_count", type="int", default=-1)
parser.add_option("-N", dest="name", type="string")
parser.add_option("-V", dest="verbose", action="store_true")
return parser.parse_args(args=argv)
class Statistics(object):
def __init__(self):
self.start_time = 0.0
self.latency_samples = 0
self.latency_total = 0.0
self.latency_min = None
self.latency_max = None
def start(self):
self.start_time = time.time()
def msg_received(self, msg):
ts = msg.creation_time
if ts:
l = long(time.time() * 1000) - ts
if l > 0.0:
self.latency_total += l
self.latency_samples += 1
if self.latency_samples == 1:
self.latency_min = self.latency_max = l
else:
if self.latency_min > l:
self.latency_min = l
if self.latency_max < l:
self.latency_max = l
def report(self, sent, received):
secs = time.time() - self.start_time
print("Messages sent: %d recv: %d" % (sent, received) )
print("Total time: %f sec" % secs )
if secs:
print("Throughput: %f msgs/sec" % (sent/secs) )
if self.latency_samples:
print("Latency (sec): %f min %f max %f avg" % (self.latency_min/1000.0,
self.latency_max/1000.0,
(self.latency_total/self.latency_samples)/1000.0))
def process_replies( messenger, message, stats, max_count, log):
"""
Return the # of reply messages received
"""
received = 0
log.debug("Calling pn_messenger_recv(%d)", max_count)
messenger.recv( max_count )
log.debug("Messages on incoming queue: %d", messenger.incoming)
while messenger.incoming > 0:
messenger.get( message )
received += 1
# TODO: header decoding?
stats.msg_received( message )
# uint64_t id = pn_message_get_correlation_id( message ).u.as_ulong;
return received
def main(argv=None):
opts = parse_options(argv)[0]
if opts.targets is None:
opts.targets = ["amqp://0.0.0.0"]
stats = Statistics()
sent = 0
received = 0
target_index = 0
log = logging.getLogger("msgr-send")
log.addHandler(logging.StreamHandler())
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
message = Message()
message.reply_to = "~"
message.body = "X" * opts.msg_size
reply_message = Message()
messenger = Messenger( opts.name )
if opts.outgoing_window is not None:
messenger.outgoing_window = opts.outgoing_window
if opts.timeout > 0:
opts.timeout *= 1000
messenger.timeout = opts.timeout
messenger.start()
# unpack targets that were specified using comma-separated list
#
targets = []
for x in opts.targets:
z = x.split(",")
for y in z:
if y:
targets.append(y)
stats.start()
while opts.msg_count == 0 or sent < opts.msg_count:
# send a message
message.address = targets[target_index]
if target_index == len(targets) - 1:
target_index = 0
else:
target_index += 1
message.correlation_id = sent
message.creation_time = long(time.time() * 1000)
messenger.put( message )
sent += 1
if opts.send_batch and (messenger.outgoing >= opts.send_batch):
if opts.get_replies:
while received < sent:
# this will also transmit any pending sent messages
received += process_replies( messenger, reply_message,
stats, opts.recv_count, log )
else:
log.debug("Calling pn_messenger_send()")
messenger.send()
log.debug("Messages received=%d sent=%d", received, sent)
if opts.get_replies:
# wait for the last of the replies
while received < sent:
count = process_replies( messenger, reply_message, stats,
opts.recv_count, log )
received += count
log.debug("Messages received=%d sent=%d", received, sent)
elif messenger.outgoing > 0:
log.debug("Calling pn_messenger_send()")
messenger.send()
messenger.stop()
stats.report( sent, received )
return 0
if __name__ == "__main__":
sys.exit(main())
|
"""
.. module: lemur.authorities.service
:platform: Unix
:synopsis: This module contains all of the services level functions used to
administer authorities in Lemur
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from flask import g
from flask import current_app
from lemur import database
from lemur.authorities.models import Authority
from lemur.roles import service as role_service
from lemur.notifications import service as notification_service
from lemur.roles.models import Role
from lemur.certificates.models import Certificate
from lemur.plugins.base import plugins
def update(authority_id, description=None, owner=None, active=None, roles=None):
"""
Update a an authority with new values.
:param authority_id:
:param roles: roles that are allowed to use this authority
:rtype : Authority
:return:
"""
authority = get(authority_id)
if roles:
authority = database.update_list(authority, 'roles', Role, roles)
if active:
authority.active = active
authority.description = description
authority.owner = owner
return database.update(authority)
def create(kwargs):
"""
Create a new authority.
:rtype : Authority
:return:
"""
issuer = plugins.get(kwargs.get('pluginName'))
kwargs['creator'] = g.current_user.email
cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)
cert = Certificate(cert_body, chain=intermediate)
cert.owner = kwargs['ownerEmail']
cert.description = "This is the ROOT certificate for the {0} certificate authority".format(kwargs.get('caName'))
cert.user = g.current_user
cert.notifications = notification_service.create_default_expiration_notifications(
'DEFAULT_SECURITY',
current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')
)
# we create and attach any roles that the issuer gives us
role_objs = []
for r in issuer_roles:
role = role_service.create(
r['name'],
password=r['password'],
description="{0} auto generated role".format(kwargs.get('pluginName')),
username=r['username'])
# the user creating the authority should be able to administer it
if role.username == 'admin':
g.current_user.roles.append(role)
role_objs.append(role)
authority = Authority(
kwargs.get('caName'),
kwargs['ownerEmail'],
kwargs['pluginName'],
cert_body,
description=kwargs['caDescription'],
chain=intermediate,
roles=role_objs
)
database.update(cert)
authority = database.create(authority)
g.current_user.authorities.append(authority)
return authority
def get_all():
"""
Get all authorities that are currently in Lemur.
:rtype : List
:return:
"""
query = database.session_query(Authority)
return database.find_all(query, Authority, {}).all()
def get(authority_id):
"""
Retrieves an authority given it's ID
:rtype : Authority
:param authority_id:
:return:
"""
return database.get(Authority, authority_id)
def get_by_name(authority_name):
"""
Retrieves an authority given it's name.
:param authority_name:
:rtype : Authority
:return:
"""
return database.get(Authority, authority_name, field='name')
def get_authority_role(ca_name):
"""
Attempts to get the authority role for a given ca uses current_user
as a basis for accomplishing that.
:param ca_name:
"""
if g.current_user.is_admin:
authority = get_by_name(ca_name)
# TODO we should pick admin ca roles for admin
return authority.roles[0]
else:
for role in g.current_user.roles:
if role.authority:
if role.authority.name == ca_name:
return role
def render(args):
"""
Helper that helps us render the REST Api responses.
:param args:
:return:
"""
query = database.session_query(Authority)
sort_by = args.pop('sort_by')
sort_dir = args.pop('sort_dir')
page = args.pop('page')
count = args.pop('count')
filt = args.pop('filter')
if filt:
terms = filt.split(';')
if 'active' in filt: # this is really weird but strcmp seems to not work here??
query = query.filter(Authority.active == terms[1])
else:
query = database.filter(query, Authority, terms)
# we make sure that a user can only use an authority they either own are are a member of - admins can see all
if not g.current_user.is_admin:
authority_ids = []
for role in g.current_user.roles:
if role.authority:
authority_ids.append(role.authority.id)
query = query.filter(Authority.id.in_(authority_ids))
query = database.find_all(query, Authority, args)
if sort_by and sort_dir:
query = database.sort(query, Authority, sort_by, sort_dir)
return database.paginate(query, page, count)
|
"""Tests for record_input_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class RecordInputOpTest(test.TestCase):
def generateTestData(self, prefix, n, m):
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(sess.run(yield_op), b"0000000000")
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.test_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = sess.run(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
sess.run(yield_op)
if __name__ == "__main__":
test.main()
|
import copy
import mock
from oslo_config import cfg
import webob
from nova.api.openstack.compute.legacy_v2.contrib import os_tenant_networks \
as networks
from nova.api.openstack.compute import tenant_networks \
as networks_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
NETWORKS = [
{
"id": 1,
"cidr": "10.20.105.0/24",
"label": "new net 1"
},
{
"id": 2,
"cidr": "10.20.105.0/24",
"label": "new net 2"
}
]
DEFAULT_NETWORK = {
"id": 3,
"cidr": "10.20.105.0/24",
"label": "default"
}
NETWORKS_WITH_DEFAULT_NET = copy.deepcopy(NETWORKS)
NETWORKS_WITH_DEFAULT_NET.append(DEFAULT_NETWORK)
DEFAULT_TENANT_ID = 1
def fake_network_api_get_all(context):
if (context.project_id == DEFAULT_TENANT_ID):
return NETWORKS_WITH_DEFAULT_NET
else:
return NETWORKS
class TenantNetworksTestV21(test.NoDBTestCase):
ctrlr = networks_v21.TenantNetworkController
validation_error = exception.ValidationError
def setUp(self):
super(TenantNetworksTestV21, self).setUp()
self.controller = self.ctrlr()
self.flags(enable_network_quota=True)
self.req = fakes.HTTPRequest.blank('')
self.original_value = CONF.use_neutron_default_nets
def tearDown(self):
super(TenantNetworksTestV21, self).tearDown()
CONF.set_override("use_neutron_default_nets", self.original_value)
def _fake_network_api_create(self, context, **kwargs):
self.assertEqual(context.project_id, kwargs['project_id'])
return NETWORKS
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.rollback')
@mock.patch('nova.network.api.API.disassociate')
@mock.patch('nova.network.api.API.delete')
def _test_network_delete_exception(self, delete_ex, disassociate_ex, expex,
delete_mock, disassociate_mock,
rollback_mock, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
if delete_mock:
delete_mock.side_effect = delete_ex
if disassociate_ex:
disassociate_mock.side_effect = disassociate_ex
self.assertRaises(expex, self.controller.delete, self.req, 1)
disassociate_mock.assert_called_once_with(ctxt, 1)
if not disassociate_ex:
delete_mock.assert_called_once_with(ctxt, 1)
rollback_mock.assert_called_once_with(ctxt, 'rv')
reserve_mock.assert_called_once_with(ctxt, networks=-1)
def test_network_delete_exception_network_not_found(self):
ex = exception.NetworkNotFound(network_id=1)
expex = webob.exc.HTTPNotFound
self._test_network_delete_exception(None, ex, expex)
def test_network_delete_exception_policy_failed(self):
ex = exception.PolicyNotAuthorized(action='dummy')
expex = webob.exc.HTTPForbidden
self._test_network_delete_exception(ex, None, expex)
def test_network_delete_exception_network_in_use(self):
ex = exception.NetworkInUse(network_id=1)
expex = webob.exc.HTTPConflict
self._test_network_delete_exception(ex, None, expex)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
@mock.patch('nova.network.api.API.delete')
@mock.patch('nova.network.api.API.disassociate')
def test_network_delete(self, disassociate_mock, delete_mock, commit_mock,
reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
res = self.controller.delete(self.req, 1)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, networks_v21.TenantNetworkController):
status_int = self.controller.delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(202, status_int)
disassociate_mock.assert_called_once_with(ctxt, 1)
delete_mock.assert_called_once_with(ctxt, 1)
commit_mock.assert_called_once_with(ctxt, 'rv')
reserve_mock.assert_called_once_with(ctxt, networks=-1)
@mock.patch('nova.network.api.API.get')
def test_network_show(self, get_mock):
get_mock.return_value = NETWORKS[0]
res = self.controller.show(self.req, 1)
self.assertEqual(NETWORKS[0], res['network'])
@mock.patch('nova.network.api.API.get')
def test_network_show_not_found(self, get_mock):
ctxt = self.req.environ['nova.context']
get_mock.side_effect = exception.NetworkNotFound(network_id=1)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 1)
get_mock.assert_called_once_with(ctxt, 1)
@mock.patch('nova.network.api.API.get_all')
def _test_network_index(self, get_all_mock, default_net=True):
CONF.set_override("use_neutron_default_nets", default_net)
get_all_mock.side_effect = fake_network_api_get_all
expected = NETWORKS
if default_net is True:
self.req.environ['nova.context'].project_id = DEFAULT_TENANT_ID
expected = NETWORKS_WITH_DEFAULT_NET
res = self.controller.index(self.req)
self.assertEqual(expected, res['networks'])
def test_network_index_with_default_net(self):
self._test_network_index()
def test_network_index_without_default_net(self):
self._test_network_index(default_net=False)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
@mock.patch('nova.network.api.API.create')
def test_network_create(self, create_mock, commit_mock, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
create_mock.side_effect = self._fake_network_api_create
body = copy.deepcopy(NETWORKS[0])
del body['id']
body = {'network': body}
res = self.controller.create(self.req, body=body)
self.assertEqual(NETWORKS[0], res['network'])
commit_mock.assert_called_once_with(ctxt, 'rv')
reserve_mock.assert_called_once_with(ctxt, networks=1)
@mock.patch('nova.quota.QUOTAS.reserve')
def test_network_create_quota_error(self, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.side_effect = exception.OverQuota(overs='fake')
body = {'network': {"cidr": "10.20.105.0/24",
"label": "new net 1"}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=body)
reserve_mock.assert_called_once_with(ctxt, networks=1)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.rollback')
@mock.patch('nova.network.api.API.create')
def _test_network_create_exception(self, ex, expex, create_mock,
rollback_mock, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
create_mock.side_effect = ex
body = {'network': {"cidr": "10.20.105.0/24",
"label": "new net 1"}}
self.assertRaises(expex, self.controller.create, self.req, body=body)
reserve_mock.assert_called_once_with(ctxt, networks=1)
def test_network_create_exception_policy_failed(self):
ex = exception.PolicyNotAuthorized(action='dummy')
expex = webob.exc.HTTPForbidden
self._test_network_create_exception(ex, expex)
def test_network_create_exception_conflictcidr(self):
ex = exception.CidrConflict(cidr='dummy', other='dummy')
expex = webob.exc.HTTPConflict
self._test_network_create_exception(ex, expex)
def test_network_create_exception_service_unavailable(self):
ex = Exception
expex = webob.exc.HTTPServiceUnavailable
self._test_network_create_exception(ex, expex)
def test_network_create_empty_body(self):
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body={})
def test_network_create_without_cidr(self):
body = {'network': {"label": "new net 1"}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_network_create_bad_format_cidr(self):
body = {'network': {"cidr": "123",
"label": "new net 1"}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_network_create_empty_network(self):
body = {'network': {}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_network_create_without_label(self):
body = {'network': {"cidr": "10.20.105.0/24"}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
class TenantNetworksTestV2(TenantNetworksTestV21):
ctrlr = networks.NetworkController
validation_error = webob.exc.HTTPBadRequest
def setUp(self):
super(TenantNetworksTestV2, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
def test_network_create_empty_body(self):
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, self.req, {})
class TenantNetworksEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TenantNetworksEnforcementV21, self).setUp()
self.controller = networks_v21.TenantNetworkController()
self.req = fakes.HTTPRequest.blank('')
def test_create_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create,
self.req, body={'network': {'label': 'test',
'cidr': '10.0.0.0/32'}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index,
self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete,
self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show,
self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
"""Ragged operations for working with string Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import tf_export
@tf_export("strings.unicode_encode")
def unicode_encode(input,
output_encoding,
errors="replace",
replacement_char=65533,
name=None):
r"""Encodes each sequence of Unicode code points in `input` into a string.
`result[i1...iN]` is the string formed by concatenating the Unicode
codepoints `input[1...iN, :]`, encoded using `output_encoding`.
Args:
input: An `N+1` dimensional potentially ragged integer tensor with shape
`[D1...DN, num_chars]`.
output_encoding: Unicode encoding that should be used to encode each
codepoint sequence. Can be `"UTF-8"`, `"UTF-16-BE"`, or `"UTF-32-BE"`.
errors: Specifies the response when an invalid codepoint is encountered
(optional). One of:
* `'replace'`: Replace invalid codepoint with the
`replacement_char`. (default)
* `'ignore'`: Skip invalid codepoints.
* `'strict'`: Raise an exception for any invalid codepoint.
replacement_char: The replacement character codepoint to be used in place of
any invalid input when `errors='replace'`. Any valid unicode codepoint may
be used. The default value is the default unicode replacement character
which is 0xFFFD (U+65533).
name: A name for the operation (optional).
Returns:
A `N` dimensional `string` tensor with shape `[D1...DN]`.
#### Example:
```python
>>> input = [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
>>> unicode_encode(input, 'UTF8')
['G\xc3\xb6\xc3\xb6dnight', '\xf0\x9f\x98\x8a']
```
"""
with ops.name_scope(name, "UnicodeEncode", [input]):
input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)
if input_tensor.shape.ndims is None:
raise ValueError("Rank of input_tensor must be statically known.")
if ragged_tensor.is_ragged(input_tensor):
if input_tensor.flat_values.shape.ndims > 1:
# If the flat_values of our ragged tensor is multi-dimensional, we can
# process it separately and our output will have the same nested splits
# as our input.
return input_tensor.with_flat_values(
unicode_encode(input_tensor.flat_values, output_encoding, errors,
replacement_char))
elif input_tensor.ragged_rank > 1:
# Recursively process the values of the ragged tensor.
return input_tensor.with_values(
unicode_encode(input_tensor.values, output_encoding, errors,
replacement_char))
else:
# Our ragged tensor is of the correct shape (rank 1 flat_values tensor
# with ragged_rank of 1) so we can process it as normal.
return gen_string_ops.unicode_encode(
input_values=input_tensor.values,
input_splits=input_tensor.row_splits,
output_encoding=output_encoding,
errors=errors,
replacement_char=replacement_char)
else:
if input_tensor.shape.ndims == 2:
# The input tensor is of the correct 2-D shape, it's just not ragged.
return unicode_encode(
ragged_conversion_ops.from_tensor(input_tensor), output_encoding,
errors, replacement_char)
elif input_tensor.shape.ndims > 2:
# We need to initially flatten the input tensor to 2-D, and then can
# reshape the output of our processed flattened tensor.
flat_input_tensor = array_ops.reshape(
input_tensor,
array_ops.stack([-1, array_ops.shape(input_tensor)[-1]]))
flat_output_tensor = unicode_encode(flat_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(flat_output_tensor, input_tensor.shape[:-1])
elif input_tensor.shape.ndims == 0:
raise ValueError("input_tensor's rank must be at least 1.")
else:
# Our input tensor is rank 1, so we create a ragged tensor with an added
# dimension to create the correct input shape & type, and then remove
# the additional dimension from the output and return the string scalar.
ragged_input_tensor = ragged_tensor.RaggedTensor.from_row_splits(
input_tensor,
array_ops.stack(
[0, array_ops.shape(input_tensor, out_type=dtypes.int64)[0]]))
output_tensor = unicode_encode(ragged_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(output_tensor, [])
@tf_export("strings.unicode_decode")
def unicode_decode(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the Unicode codepoint for the `j`th character in
`input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_decode(input, 'UTF-8').tolist()
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
```
"""
with ops.name_scope(name, "UnicodeDecode", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=False)
@tf_export("strings.unicode_decode_with_offsets")
def unicode_decode_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(codepoints, start_offsets)` where:
* `codepoints[i1...iN, j]` is the Unicode codepoint for the `j`th character
in `input[i1...iN]`, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8')
>>> result[0].tolist() # codepoints
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
>>> result[1].tolist() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
```
"""
with ops.name_scope(name, "UnicodeDecodeWithOffsets", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=True)
@tf_export("strings.unicode_split")
def unicode_split(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_split(input, 'UTF-8').tolist()
[['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'],
['\xf0\x9f\x98\x8a']]
```
"""
with ops.name_scope(name, "UnicodeSplit", [input]):
codepoints = _unicode_decode(input, input_encoding, errors,
replacement_char, False, with_offsets=False)
return unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
@tf_export("strings.unicode_split_with_offsets")
def unicode_split_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(chars, start_offsets)` where:
* `chars[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8')
>>> result[0].tolist() # character substrings
[['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'],
['\xf0\x9f\x98\x8a']]
>>> result[1].tolist() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
```
"""
with ops.name_scope(name, "UnicodeSplitWithOffsets", [input]):
codepoints, offsets = _unicode_decode(input, input_encoding, errors,
replacement_char, False,
with_offsets=True)
chars = unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
return chars, offsets
def _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets):
"""Decodes each string into a sequence of codepoints."""
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name="input")
input_ndims = input.shape.ndims
if input_ndims is None:
raise ValueError("Rank of `input` must be statically known.")
if input_ndims > 1:
# Convert to a ragged tensor with ragged_rank = input_ndims - 1.
if not ragged_tensor.is_ragged(input):
input = ragged_conversion_ops.from_tensor(
input, ragged_rank=input_ndims - 1)
elif input.ragged_rank < input_ndims - 1:
input = input.with_flat_values(
ragged_conversion_ops.from_tensor(
input.flat_values,
ragged_rank=input_ndims - input.ragged_rank + 1))
# Reshape the input to a flat vector, and apply the gen_string_ops op.
if ragged_tensor.is_ragged(input):
flat_input = array_ops.reshape(input.flat_values, [-1])
else:
flat_input = array_ops.reshape(input, [-1])
if with_offsets:
decode_op = gen_string_ops.unicode_decode_with_offsets
else:
decode_op = gen_string_ops.unicode_decode
flat_result = decode_op(
input=flat_input,
input_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char,
replace_control_characters=replace_control_characters)
if input_ndims == 0:
codepoints = flat_result.char_values
if with_offsets:
offsets = flat_result.char_to_byte_starts
else:
codepoints = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_values, flat_result.row_splits)
if input_ndims > 1:
codepoints = input.with_flat_values(codepoints)
if with_offsets:
offsets = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_to_byte_starts, flat_result.row_splits)
if input_ndims > 1:
offsets = input.with_flat_values(offsets)
if with_offsets:
return codepoints, offsets
else:
return codepoints
|
from rosbridge_library.internal.ros_loader import get_service_class
from rosbridge_library.internal import message_conversion
from rosbridge_library.capability import Capability
import rospy
import time
class AdvertisedServiceHandler():
id_counter = 1
responses = {}
def __init__(self, service_name, service_type, protocol):
self.service_name = service_name
self.service_type = service_type
self.protocol = protocol
# setup the service
self.service_handle = rospy.Service(service_name, get_service_class(service_type), self.handle_request)
def next_id(self):
id = self.id_counter
self.id_counter += 1
return id
def handle_request(self, req):
# generate a unique ID
request_id = "service_request:" + self.service_name + ":" + str(self.next_id())
# build a request to send to the external client
request_message = {
"op": "call_service",
"id": request_id,
"service": self.service_name,
"args": message_conversion.extract_values(req)
}
self.protocol.send(request_message)
# wait for a response
while request_id not in self.responses.keys():
time.sleep(0)
resp = self.responses[request_id]
del self.responses[request_id]
return resp
class AdvertiseService(Capability):
advertise_service_msg_fields = [(True, "service", (str, unicode)), (True, "type", (str, unicode))]
def __init__(self, protocol):
# Call superclass constructor
Capability.__init__(self, protocol)
# Register the operations that this capability provides
protocol.register_operation("advertise_service", self.advertise_service)
def advertise_service(self, message):
# parse the incoming message
service_name = message["service"]
# check for an existing entry
if service_name in self.protocol.external_service_list.keys():
self.protocol.log("warn", "Duplicate service advertised. Overwriting %s." % service_name)
self.protocol.external_service_list[service_name].service_handle.shutdown("Duplicate advertiser.")
del self.protocol.external_service_list[service_name]
# setup and store the service information
service_type = message["type"]
service_handler = AdvertisedServiceHandler(service_name, service_type, self.protocol)
self.protocol.external_service_list[service_name] = service_handler
self.protocol.log("info", "Advertised service %s." % service_name)
|
"""Token-related utilities"""
from __future__ import absolute_import, print_function
from collections import namedtuple
from io import StringIO
from keyword import iskeyword
from . import tokenize2
from .py3compat import cast_unicode_py2
Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
def generate_tokens(readline):
"""wrap generate_tokens to catch EOF errors"""
try:
for token in tokenize2.generate_tokens(readline):
yield token
except tokenize2.TokenError:
# catch EOF error
return
def line_at_cursor(cell, cursor_pos=0):
"""Return the line in a cell at a given cursor position
Used for calling line-based APIs that don't support multi-line input, yet.
Parameters
----------
cell: str
multiline block of text
cursor_pos: integer
the cursor position
Returns
-------
(line, offset): (text, integer)
The line with the current cursor, and the character offset of the start of the line.
"""
offset = 0
lines = cell.splitlines(True)
for line in lines:
next_offset = offset + len(line)
if next_offset >= cursor_pos:
break
offset = next_offset
else:
line = ""
return (line, offset)
def token_at_cursor(cell, cursor_pos=0):
"""Get the token at a given cursor
Used for introspection.
Function calls are prioritized, so the token for the callable will be returned
if the cursor is anywhere inside the call.
Parameters
----------
cell : unicode
A block of Python code
cursor_pos : int
The location of the cursor in the block where the token should be found
"""
cell = cast_unicode_py2(cell)
names = []
tokens = []
call_names = []
offsets = {1: 0} # lines start at 1
for tup in generate_tokens(StringIO(cell).readline):
tok = Token(*tup)
# token, text, start, end, line = tup
start_line, start_col = tok.start
end_line, end_col = tok.end
if end_line + 1 not in offsets:
# keep track of offsets for each line
lines = tok.line.splitlines(True)
for lineno, line in zip(range(start_line + 1, end_line + 2), lines):
if lineno not in offsets:
offsets[lineno] = offsets[lineno-1] + len(line)
offset = offsets[start_line]
# allow '|foo' to find 'foo' at the beginning of a line
boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
if offset + start_col >= boundary:
# current token starts after the cursor,
# don't consume it
break
if tok.token == tokenize2.NAME and not iskeyword(tok.text):
if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
names[-1] = "%s.%s" % (names[-1], tok.text)
else:
names.append(tok.text)
elif tok.token == tokenize2.OP:
if tok.text == '=' and names:
# don't inspect the lhs of an assignment
names.pop(-1)
if tok.text == '(' and names:
# if we are inside a function call, inspect the function
call_names.append(names[-1])
elif tok.text == ')' and call_names:
call_names.pop(-1)
tokens.append(tok)
if offsets[end_line] + end_col > cursor_pos:
# we found the cursor, stop reading
break
if call_names:
return call_names[-1]
elif names:
return names[-1]
else:
return ''
|
import ctypes
from . import copy_ctypes_list
from .arm64_const import *
class Arm64OpMem(ctypes.Structure):
_fields_ = (
('base', ctypes.c_uint),
('index', ctypes.c_uint),
('disp', ctypes.c_int32),
)
class Arm64OpShift(ctypes.Structure):
_fields_ = (
('type', ctypes.c_uint),
('value', ctypes.c_uint),
)
class Arm64OpValue(ctypes.Union):
_fields_ = (
('reg', ctypes.c_uint),
('imm', ctypes.c_int64),
('fp', ctypes.c_double),
('mem', Arm64OpMem),
('pstate', ctypes.c_int),
('sys', ctypes.c_uint),
('prefetch', ctypes.c_int),
('barrier', ctypes.c_int),
)
class Arm64Op(ctypes.Structure):
_fields_ = (
('vector_index', ctypes.c_int),
('vas', ctypes.c_int),
('vess', ctypes.c_int),
('shift', Arm64OpShift),
('ext', ctypes.c_uint),
('type', ctypes.c_uint),
('value', Arm64OpValue),
)
@property
def imm(self):
return self.value.imm
@property
def reg(self):
return self.value.reg
@property
def fp(self):
return self.value.fp
@property
def mem(self):
return self.value.mem
@property
def pstate(self):
return self.value.pstate
@property
def sys(self):
return self.value.sys
@property
def prefetch(self):
return self.value.prefetch
@property
def barrier(self):
return self.value.barrier
class CsArm64(ctypes.Structure):
_fields_ = (
('cc', ctypes.c_uint),
('update_flags', ctypes.c_bool),
('writeback', ctypes.c_bool),
('op_count', ctypes.c_uint8),
('operands', Arm64Op * 8),
)
def get_arch_info(a):
return (a.cc, a.update_flags, a.writeback, copy_ctypes_list(a.operands[:a.op_count]))
|
import mock
import unittest
from boto.ec2.address import Address
class AddressTest(unittest.TestCase):
def setUp(self):
self.address = Address()
self.address.connection = mock.Mock()
self.address.public_ip = "192.168.1.1"
def check_that_attribute_has_been_set(self, name, value, attribute):
self.address.endElement(name, value, None)
self.assertEqual(getattr(self.address, attribute), value)
def test_endElement_sets_correct_attributes_with_values(self):
for arguments in [("publicIp", "192.168.1.1", "public_ip"),
("instanceId", 1, "instance_id"),
("domain", "some domain", "domain"),
("allocationId", 1, "allocation_id"),
("associationId", 1, "association_id"),
("somethingRandom", "somethingRandom", "somethingRandom")]:
self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
def test_release_calls_connection_release_address_with_correct_args(self):
self.address.release()
self.address.connection.release_address.assert_called_with("192.168.1.1")
def test_associate_calls_connection_associate_address_with_correct_args(self):
self.address.associate(1)
self.address.connection.associate_address.assert_called_with(1, "192.168.1.1")
def test_disassociate_calls_connection_disassociate_address_with_correct_args(self):
self.address.disassociate()
self.address.connection.disassociate_address.assert_called_with("192.168.1.1")
if __name__ == "__main__":
unittest.main()
|
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
equal,
create_connection,
ovirt_full_argument_spec,
)
ANSIBLE_METADATA = {'status': 'preview',
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_mac_pools
short_description: Module to manage MAC pools in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage MAC pools in oVirt."
options:
name:
description:
- "Name of the the MAC pool to manage."
required: true
description:
description:
- "Description of the MAC pool."
state:
description:
- "Should the mac pool be present or absent."
choices: ['present', 'absent']
default: present
allow_duplicates:
description:
- "If (true) allow a MAC address to be used multiple times in a pool."
- "Default value is set by oVirt engine to I(false)."
ranges:
description:
- "List of MAC ranges. The from and to should be splitted by comma."
- "For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
- ovirt_mac_pools:
name: mymacpool
allow_duplicates: false
ranges:
- 00:1a:4a:16:01:51,00:1a:4a:16:01:61
- 00:1a:4a:16:02:51,00:1a:4a:16:02:61
- ovirt_mac_pools:
state: absent
name: mymacpool
'''
RETURN = '''
id:
description: ID of the MAC pool which is managed
returned: On success if MAC pool is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
template:
description: "Dictionary of all the MAC pool attributes. MAC pool attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/mac_pool."
returned: On success if MAC pool is found.
'''
class MACPoolModule(BaseModule):
def build_entity(self):
return otypes.MacPool(
name=self._module.params['name'],
allow_duplicates=self._module.params['allow_duplicates'],
description=self._module.params['description'],
ranges=[
otypes.Range(
from_=mac_range.split(',')[0],
to=mac_range.split(',')[1],
)
for mac_range in self._module.params['ranges']
],
)
def _compare_ranges(self, entity):
if self._module.params['ranges'] is not None:
ranges = sorted([
'%s,%s' % (mac_range.from_, mac_range.to)
for mac_range in entity.ranges
])
return equal(sorted(self._module.params['ranges']), ranges)
return True
def update_check(self, entity):
return (
self._compare_ranges(entity) and
equal(self._module.params['allow_duplicates'], entity.allow_duplicates) and
equal(self._module.params['description'], entity.description)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
allow_duplicates=dict(default=None, type='bool'),
description=dict(default=None),
ranges=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
mac_pools_service = connection.system_service().mac_pools_service()
mac_pools_module = MACPoolModule(
connection=connection,
module=module,
service=mac_pools_service,
)
state = module.params['state']
if state == 'present':
ret = mac_pools_module.create()
elif state == 'absent':
ret = mac_pools_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == "__main__":
main()
|
"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0
"""
import operator
from .plugin.plugin_base import SkipTest
from sqlalchemy.util import decorator
from . import config
from sqlalchemy import util
from ..util import compat
import inspect
import contextlib
from .compat import get_url_driver_name, get_url_backend_name
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
self.tags = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
copy.tags.update(self.tags)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
copy.tags.update(other.tags)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
copy.tags.update(self.tags)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config) for predicate
in self.skips.union(self.fails)
if predicate(config)
]
def include_test(self, include_tags, exclude_tags):
return bool(
not self.tags.intersection(exclude_tags) and
(not include_tags or self.tags.intersection(include_tags))
)
def _extend(self, other):
self.skips.update(other.skips)
self.fails.update(other.fails)
self.tags.update(other.tags)
def __call__(self, fn):
if hasattr(fn, '_sa_exclusion_extend'):
fn._sa_exclusion_extend._extend(self)
return fn
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = self
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, config, fn, *args, **kw):
for skip in self.skips:
if skip(config):
msg = "'%s' : %s" % (
fn.__name__,
skip._as_string(config)
)
raise SkipTest(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(config, ex, name=fn.__name__)
else:
self._expect_success(config, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name='block'):
for fail in self.fails:
if fail(config):
print(("%s failed as expected (%s): %s " % (
name, fail._as_string(config), str(ex))))
break
else:
compat.raise_from_cause(ex)
def _expect_success(self, config, name='block'):
if not self.fails:
return
for fail in self.fails:
if not fail(config):
break
else:
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(
name,
" and ".join(
fail._as_string(config)
for fail in self.fails
)
)
)
def requires_tag(tagname):
return tags([tagname])
def tags(tagnames):
comp = compound()
comp.tags.update(tagnames)
return comp
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.fails.union(predicate.skips))
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate],
description)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, compat.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": get_url_driver_name(config.db.url),
"database": get_url_backend_name(config.db.url),
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support"
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect.getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(config, negate=negate)
for p in self.predicates)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails(msg=None):
return fails_if(BooleanPredicate(True, msg or "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
Predicate.as_predicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([SpecPredicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
|
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import errno, socket, select, os
from Cookie import SimpleCookie
from contextlib import closing
from urlparse import parse_qs
import repr as reprlib
from email.utils import formatdate
from operator import itemgetter
from future_builtins import map
from urllib import quote as urlquote
from binascii import hexlify, unhexlify
from calibre import prints
from calibre.constants import iswindows
from calibre.utils.config_base import tweaks
from calibre.utils.localization import get_translator
from calibre.utils.socket_inheritance import set_socket_inherit
from calibre.utils.logging import ThreadSafeLog
from calibre.utils.shared_file import share_open, raise_winerror
HTTP1 = 'HTTP/1.0'
HTTP11 = 'HTTP/1.1'
DESIRED_SEND_BUFFER_SIZE = 16 * 1024 # windows 7 uses an 8KB sndbuf
def http_date(timeval=None):
return type('')(formatdate(timeval=timeval, usegmt=True))
class MultiDict(dict): # {{{
def __setitem__(self, key, val):
vals = dict.get(self, key, [])
vals.append(val)
dict.__setitem__(self, key, vals)
def __getitem__(self, key):
return dict.__getitem__(self, key)[-1]
@staticmethod
def create_from_query_string(qs):
ans = MultiDict()
for k, v in parse_qs(qs, keep_blank_values=True).iteritems():
dict.__setitem__(ans, k.decode('utf-8'), [x.decode('utf-8') for x in v])
return ans
def update_from_listdict(self, ld):
for key, values in ld.iteritems():
for val in values:
self[key] = val
def items(self, duplicates=True):
for k, v in dict.iteritems(self):
if duplicates:
for x in v:
yield k, x
else:
yield k, v[-1]
iteritems = items
def values(self, duplicates=True):
for v in dict.itervalues(self):
if duplicates:
for x in v:
yield x
else:
yield v[-1]
itervalues = values
def set(self, key, val, replace_all=False):
if replace_all:
dict.__setitem__(self, key, [val])
else:
self[key] = val
def get(self, key, default=None, all=False):
if all:
try:
return dict.__getitem__(self, key)
except KeyError:
return []
try:
return self.__getitem__(key)
except KeyError:
return default
def pop(self, key, default=None, all=False):
ans = dict.pop(self, key, default)
if ans is default:
return [] if all else default
return ans if all else ans[-1]
def __repr__(self):
return '{' + ', '.join('%s: %s' % (reprlib.repr(k), reprlib.repr(v)) for k, v in self.iteritems()) + '}'
__str__ = __unicode__ = __repr__
def pretty(self, leading_whitespace=''):
return leading_whitespace + ('\n' + leading_whitespace).join(
'%s: %s' % (k, (repr(v) if isinstance(v, bytes) else v)) for k, v in sorted(self.items(), key=itemgetter(0)))
def error_codes(*errnames):
''' Return error numbers for error names, ignoring non-existent names '''
ans = {getattr(errno, x, None) for x in errnames}
ans.discard(None)
return ans
socket_errors_eintr = error_codes("EINTR", "WSAEINTR")
socket_errors_socket_closed = error_codes( # errors indicating a disconnected connection
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ENOTCONN", "WSAENOTCONN",
"ESHUTDOWN", "WSAESHUTDOWN",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_nonblocking = error_codes(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
def start_cork(sock):
if hasattr(socket, 'TCP_CORK'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
def stop_cork(sock):
if hasattr(socket, 'TCP_CORK'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
def create_sock_pair(port=0):
'''Create socket pair. Works also on windows by using an ephemeral TCP port.'''
if hasattr(socket, 'socketpair'):
client_sock, srv_sock = socket.socketpair()
set_socket_inherit(client_sock, False), set_socket_inherit(srv_sock, False)
return client_sock, srv_sock
# Create a non-blocking temporary server socket
temp_srv_sock = socket.socket()
set_socket_inherit(temp_srv_sock, False)
temp_srv_sock.setblocking(False)
temp_srv_sock.bind(('127.0.0.1', port))
port = temp_srv_sock.getsockname()[1]
temp_srv_sock.listen(1)
with closing(temp_srv_sock):
# Create non-blocking client socket
client_sock = socket.socket()
client_sock.setblocking(False)
set_socket_inherit(client_sock, False)
try:
client_sock.connect(('127.0.0.1', port))
except socket.error as err:
# EWOULDBLOCK is not an error, as the socket is non-blocking
if err.errno not in socket_errors_nonblocking:
raise
# Use select to wait for connect() to succeed.
timeout = 1
readable = select.select([temp_srv_sock], [], [], timeout)[0]
if temp_srv_sock not in readable:
raise Exception('Client socket not connected in {} second(s)'.format(timeout))
srv_sock = temp_srv_sock.accept()[0]
set_socket_inherit(srv_sock, False)
client_sock.setblocking(True)
return client_sock, srv_sock
def parse_http_list(header_val):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
if isinstance(header_val, bytes):
slash, dquote, comma = b'\\",'
empty = b''
else:
slash, dquote, comma = '\\",'
empty = ''
part = empty
escape = quote = False
for cur in header_val:
if escape:
part += cur
escape = False
continue
if quote:
if cur == slash:
escape = True
continue
elif cur == dquote:
quote = False
part += cur
continue
if cur == comma:
yield part.strip()
part = empty
continue
if cur == dquote:
quote = True
part += cur
if part:
yield part.strip()
def parse_http_dict(header_val):
'Parse an HTTP comma separated header with items of the form a=1, b="xxx" into a dictionary'
if not header_val:
return {}
ans = {}
sep, dquote = b'="' if isinstance(header_val, bytes) else '="'
for item in parse_http_list(header_val):
k, v = item.partition(sep)[::2]
if k:
if v.startswith(dquote) and v.endswith(dquote):
v = v[1:-1]
ans[k] = v
return ans
def sort_q_values(header_val):
'Get sorted items from an HTTP header of type: a;q=0.5, b;q=0.7...'
if not header_val:
return []
def item(x):
e, r = x.partition(';')[::2]
p, v = r.partition('=')[::2]
q = 1.0
if p == 'q' and v:
try:
q = max(0.0, min(1.0, float(v.strip())))
except Exception:
pass
return e.strip(), q
return tuple(map(itemgetter(0), sorted(map(item, parse_http_list(header_val)), key=itemgetter(1), reverse=True)))
def eintr_retry_call(func, *args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except EnvironmentError as e:
if getattr(e, 'errno', None) in socket_errors_eintr:
continue
raise
def get_translator_for_lang(cache, bcp_47_code):
try:
return cache[bcp_47_code]
except KeyError:
pass
cache[bcp_47_code] = ans = get_translator(bcp_47_code)
return ans
def encode_path(*components):
'Encode the path specified as a list of path components using URL encoding'
return '/' + '/'.join(urlquote(x.encode('utf-8'), '').decode('ascii') for x in components)
def encode_name(name):
'Encode a name (arbitrary string) as URL safe characters. See decode_name() also.'
if isinstance(name, unicode):
name = name.encode('utf-8')
return hexlify(name)
def decode_name(name):
return unhexlify(name).decode('utf-8')
class Cookie(SimpleCookie):
def _BaseCookie__set(self, key, real_value, coded_value):
if not isinstance(key, bytes):
key = key.encode('ascii') # Python 2.x cannot handle unicode keys
return SimpleCookie._BaseCookie__set(self, key, real_value, coded_value)
def custom_fields_to_display(db):
ckeys = set(db.field_metadata.ignorable_field_keys())
yes_fields = set(tweaks['content_server_will_display'])
no_fields = set(tweaks['content_server_wont_display'])
if '*' in yes_fields:
yes_fields = ckeys
if '*' in no_fields:
no_fields = ckeys
return frozenset(ckeys & (yes_fields - no_fields))
class ServerLog(ThreadSafeLog):
exception_traceback_level = ThreadSafeLog.WARN
class RotatingStream(object):
def __init__(self, filename, max_size=None, history=5):
self.filename, self.history, self.max_size = filename, history, max_size
if iswindows:
self.filename = '\\\\?\\' + os.path.abspath(self.filename)
self.set_output()
def set_output(self):
self.stream = share_open(self.filename, 'ab', -1 if iswindows else 1) # line buffered
try:
self.current_pos = self.stream.tell()
except EnvironmentError:
# Happens if filename is /dev/stdout for example
self.current_pos = 0
self.max_size = None
def flush(self):
self.stream.flush()
def prints(self, level, *args, **kwargs):
kwargs['safe_encode'] = True
kwargs['file'] = self.stream
self.current_pos += prints(*args, **kwargs)
if iswindows:
# For some reason line buffering does not work on windows
end = kwargs.get('end', b'\n')
if b'\n' in end:
self.flush()
self.rollover()
def rename(self, src, dest):
try:
if iswindows:
import win32file, pywintypes
try:
win32file.MoveFileEx(src, dest, win32file.MOVEFILE_REPLACE_EXISTING|win32file.MOVEFILE_WRITE_THROUGH)
except pywintypes.error as e:
raise_winerror(e)
else:
os.rename(src, dest)
except EnvironmentError as e:
if e.errno != errno.ENOENT: # the source of the rename does not exist
raise
def rollover(self):
if self.max_size is None or self.current_pos <= self.max_size:
return
self.stream.close()
for i in xrange(self.history - 1, 0, -1):
src, dest = '%s.%d' % (self.filename, i), '%s.%d' % (self.filename, i+1)
self.rename(src, dest)
self.rename(self.filename, '%s.%d' % (self.filename, 1))
self.set_output()
class RotatingLog(ServerLog):
def __init__(self, filename, max_size=None, history=5):
ServerLog.__init__(self)
self.outputs = [RotatingStream(filename, max_size, history)]
def flush(self):
for o in self.outputs:
o.flush()
class HandleInterrupt(object): # {{{
# On windows socket functions like accept(), recv(), send() are not
# interrupted by a Ctrl-C in the console. So to make Ctrl-C work we have to
# use this special context manager. See the echo server example at the
# bottom of this file for how to use it.
def __init__(self, action):
if not iswindows:
return # Interrupts work fine on POSIX
self.action = action
from ctypes import WINFUNCTYPE, windll
from ctypes.wintypes import BOOL, DWORD
kernel32 = windll.LoadLibrary('kernel32')
# <http://msdn.microsoft.com/en-us/library/ms686016.aspx>
PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD)
self.SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler
self.SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL)
self.SetConsoleCtrlHandler.restype = BOOL
@PHANDLER_ROUTINE
def handle(event):
if event == 0: # CTRL_C_EVENT
if self.action is not None:
self.action()
self.action = None
# Typical C implementations would return 1 to indicate that
# the event was processed and other control handlers in the
# stack should not be executed. However, that would
# prevent the Python interpreter's handler from translating
# CTRL-C to a `KeyboardInterrupt` exception, so we pretend
# that we didn't handle it.
return 0
self.handle = handle
def __enter__(self):
if iswindows:
if self.SetConsoleCtrlHandler(self.handle, 1) == 0:
raise WindowsError()
def __exit__(self, *args):
if iswindows:
if self.SetConsoleCtrlHandler(self.handle, 0) == 0:
raise WindowsError()
class Accumulator(object): # {{{
'Optimized replacement for BytesIO when the usage pattern is many writes followed by a single getvalue()'
def __init__(self):
self._buf = []
self.total_length = 0
def append(self, b):
self._buf.append(b)
self.total_length += len(b)
def getvalue(self):
ans = b''.join(self._buf)
self._buf = []
self.total_length = 0
return ans
|
from math import fabs, ceil
import traceback
import re
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.variable import mergeDicts, getExt, tryInt, splitString, tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.quality.index import QualityIndex
log = CPLog(__name__)
class QualityPlugin(Plugin):
_database = {
'quality': QualityIndex
}
qualities = [
{'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'median_size': 40000, 'label': 'BR-Disk', 'alternative': ['bd25', ('br', 'disk')], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']},
{'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'median_size': 10000, 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts', 'ts'], 'tags': ['m2ts', 'x264', 'h264']},
{'identifier': '720p', 'hd': True, 'allow_3d': True, 'size': (3000, 10000), 'median_size': 5500, 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']},
{'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'median_size': 2000, 'label': 'BR-Rip', 'alternative': ['bdrip', ('br', 'rip'), 'hdtv', 'hdrip'], 'allow': ['720p', '1080p'], 'ext':['mp4', 'avi'], 'tags': ['webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'median_size': 4500, 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'median_size': 1500, 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'median_size': 700, 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr', 'webrip', ('web', 'rip')], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': []},
{'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p', '1080p'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p', '1080p'], 'ext':[]}
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
threed_tags = {
'sbs': [('half', 'sbs'), 'hsbs', ('full', 'sbs'), 'fsbs'],
'ou': [('half', 'ou'), 'hou', ('full', 'ou'), 'fou'],
'3d': ['2d3d', '3d2d','3d','sbs','3dbd','hsbs'],
}
cached_qualities = None
cached_order = None
def __init__(self):
addEvent('quality.all', self.all)
addEvent('quality.single', self.single)
addEvent('quality.guess', self.guess)
addEvent('quality.pre_releases', self.preReleases)
addEvent('quality.order', self.getOrder)
addEvent('quality.ishigher', self.isHigher)
addEvent('quality.isfinish', self.isFinish)
addEvent('quality.fill', self.fill)
addApiView('quality.size.save', self.saveSize)
addApiView('quality.list', self.allView, docs = {
'desc': 'List all available qualities',
'return': {'type': 'object', 'example': """{
'success': True,
'list': array, qualities
}"""}
})
addEvent('app.initialize', self.fill, priority = 10)
addEvent('app.test', self.doTest)
self.order = []
self.addOrder()
def addOrder(self):
self.order = []
for q in self.qualities:
self.order.append(q.get('identifier'))
def getOrder(self):
return self.order
def preReleases(self):
return self.pre_releases
def allView(self, **kwargs):
return {
'success': True,
'list': self.all()
}
def all(self):
if self.cached_qualities:
return self.cached_qualities
db = get_db()
temp = []
for quality in self.qualities:
quality_doc = db.get('quality', quality.get('identifier'), with_doc = True)['doc']
q = mergeDicts(quality, quality_doc)
temp.append(q)
if len(temp) == len(self.qualities):
self.cached_qualities = temp
return temp
def single(self, identifier = ''):
db = get_db()
quality_dict = {}
quality = db.get('quality', identifier, with_doc = True)['doc']
if quality:
quality_dict = mergeDicts(self.getQuality(quality['identifier']), quality)
return quality_dict
def getQuality(self, identifier):
for q in self.qualities:
if identifier == q.get('identifier'):
return q
def saveSize(self, **kwargs):
try:
db = get_db()
quality = db.get('quality', kwargs.get('identifier'), with_doc = True)
if quality:
quality['doc'][kwargs.get('value_type')] = tryInt(kwargs.get('value'))
db.update(quality['doc'])
self.cached_qualities = None
return {
'success': True
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def fill(self):
try:
db = get_db()
order = 0
for q in self.qualities:
existing = None
try:
existing = db.get('quality', q.get('identifier'))
except RecordNotFound:
pass
if not existing:
db.insert({
'_t': 'quality',
'order': order,
'identifier': q.get('identifier'),
'size_min': tryInt(q.get('size')[0]),
'size_max': tryInt(q.get('size')[1]),
})
log.info('Creating profile: %s', q.get('label'))
db.insert({
'_t': 'profile',
'order': order + 20, # Make sure it goes behind other profiles
'core': True,
'qualities': [q.get('identifier')],
'label': toUnicode(q.get('label')),
'finish': [True],
'wait_for': [0],
})
order += 1
return True
except:
log.error('Failed: %s', traceback.format_exc())
return False
def guess(self, files, extra = None, size = None, use_cache = True):
if not extra: extra = {}
# Create hash for cache
cache_key = str([f.replace('.' + getExt(f), '') if len(getExt(f)) < 4 else f for f in files])
if use_cache:
cached = self.getCache(cache_key)
if cached and len(extra) == 0:
return cached
qualities = self.all()
# Start with 0
score = {}
for quality in qualities:
score[quality.get('identifier')] = {
'score': 0,
'3d': {}
}
# Use metadata titles as extra check
if extra and extra.get('titles'):
files.extend(extra.get('titles'))
for cur_file in files:
words = re.split('\W+', cur_file.lower())
name_year = fireEvent('scanner.name_year', cur_file, file_name = cur_file, single = True)
threed_words = words
if name_year and name_year.get('name'):
split_name = splitString(name_year.get('name'), ' ')
threed_words = [x for x in words if x not in split_name]
for quality in qualities:
contains_score = self.containsTagScore(quality, words, cur_file)
threedscore = self.contains3D(quality, threed_words, cur_file) if quality.get('allow_3d') else (0, None)
self.calcScore(score, quality, contains_score, threedscore, penalty = contains_score)
size_scores = []
for quality in qualities:
# Evaluate score based on size
size_score = self.guessSizeScore(quality, size = size)
loose_score = self.guessLooseScore(quality, extra = extra)
if size_score > 0:
size_scores.append(quality)
self.calcScore(score, quality, size_score + loose_score)
# Add additional size score if only 1 size validated
if len(size_scores) == 1:
self.calcScore(score, size_scores[0], 8)
del size_scores
# Return nothing if all scores are <= 0
has_non_zero = 0
for s in score:
if score[s]['score'] > 0:
has_non_zero += 1
if not has_non_zero:
return None
heighest_quality = max(score, key = lambda p: score[p]['score'])
if heighest_quality:
for quality in qualities:
if quality.get('identifier') == heighest_quality:
quality['is_3d'] = False
if score[heighest_quality].get('3d'):
quality['is_3d'] = True
return self.setCache(cache_key, quality)
return None
def containsTagScore(self, quality, words, cur_file = ''):
cur_file = ss(cur_file)
score = 0.0
extension = words[-1]
words = words[:-1]
points = {
'identifier': 20,
'label': 20,
'alternative': 20,
'tags': 11,
'ext': 5,
}
scored_on = []
# Check alt and tags
for tag_type in ['identifier', 'alternative', 'tags', 'label']:
qualities = quality.get(tag_type, [])
qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities
for alt in qualities:
if isinstance(alt, tuple):
if len(set(words) & set(alt)) == len(alt):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words and ss(alt.lower()) not in scored_on:
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
# Don't score twice on same tag
scored_on.append(ss(alt).lower())
# Check extention
for ext in quality.get('ext', []):
if ext == extension:
log.debug('Found %s with .%s extension in %s', (quality['identifier'], ext, cur_file))
score += points['ext']
return score
def contains3D(self, quality, words, cur_file = ''):
cur_file = ss(cur_file)
for key in self.threed_tags:
tags = self.threed_tags.get(key, [])
for tag in tags:
if isinstance(tag, tuple):
if len(set(words) & set(tag)) == len(tag):
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
elif tag in words:
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
return 0, None
def guessLooseScore(self, quality, extra = None):
score = 0
if extra:
# Check width resolution, range 20
if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0)))
score += 10
# Check height resolution, range 20
if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0)))
score += 5
if quality.get('identifier') == 'dvdrip' and 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Add point for correct dvdrip resolutions')
score += 1
return score
def guessSizeScore(self, quality, size = None):
score = 0
if size:
size = tryFloat(size)
size_min = tryFloat(quality['size_min'])
size_max = tryFloat(quality['size_max'])
if size_min <= size <= size_max:
log.debug('Found %s via release size: %s MB < %s MB < %s MB', (quality['identifier'], size_min, size, size_max))
proc_range = size_max - size_min
size_diff = size - size_min
size_proc = (size_diff / proc_range)
median_diff = quality['median_size'] - size_min
median_proc = (median_diff / proc_range)
max_points = 8
score += ceil(max_points - (fabs(size_proc - median_proc) * max_points))
else:
score -= 5
return score
def calcScore(self, score, quality, add_score, threedscore = (0, None), penalty = 0):
score[quality['identifier']]['score'] += add_score
threedscore, threedtag = threedscore
if threedscore and threedtag:
if threedscore not in score[quality['identifier']]['3d']:
score[quality['identifier']]['3d'][threedtag] = 0
score[quality['identifier']]['3d'][threedtag] += threedscore
# Set order for allow calculation (and cache)
if not self.cached_order:
self.cached_order = {}
for q in self.qualities:
self.cached_order[q.get('identifier')] = self.qualities.index(q)
if penalty and add_score != 0:
for allow in quality.get('allow', []):
score[allow]['score'] -= ((penalty * 2) if self.cached_order[allow] < self.cached_order[quality['identifier']] else penalty) * 2
# Give panelty for all other qualities
for q in self.qualities:
if quality.get('identifier') != q.get('identifier') and score.get(q.get('identifier')):
score[q.get('identifier')]['score'] -= 1
def isFinish(self, quality, profile, release_age = 0):
if not isinstance(profile, dict) or not profile.get('qualities'):
# No profile so anything (scanned) is good enough
return True
try:
index = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else False) == bool(quality.get('is_3d', False))][0]
if index == 0 or (profile['finish'][index] and int(release_age) >= int(profile.get('stop_after', [0])[0])):
return True
return False
except:
return False
def isHigher(self, quality, compare_with, profile = None):
if not isinstance(profile, dict) or not profile.get('qualities'):
profile = fireEvent('profile.default', single = True)
# Try to find quality in profile, if not found: a quality we do not want is lower than anything else
try:
quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0]
except:
log.debug('Quality %s not found in profile identifiers %s', (quality['identifier'] + (' 3D' if quality.get('is_3d', 0) else ''), \
[identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
return 'lower'
# Try to find compare quality in profile, if not found: anything is higher than a not wanted quality
try:
compare_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == compare_with['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(compare_with.get('is_3d', 0))][0]
except:
log.debug('Compare quality %s not found in profile identifiers %s', (compare_with['identifier'] + (' 3D' if compare_with.get('is_3d', 0) else ''), \
[identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
return 'higher'
# Note to self: a lower number means higher quality
if quality_order > compare_order:
return 'lower'
elif quality_order == compare_order:
return 'equal'
else:
return 'higher'
def doTest(self):
tests = {
'Movie Name (1999)-DVD-Rip.avi': {'size': 700, 'quality': 'dvdrip'},
'Movie Name 1999 720p Bluray.mkv': {'size': 4200, 'quality': '720p'},
'Movie Name 1999 BR-Rip 720p.avi': {'size': 1000, 'quality': 'brrip'},
'Movie Name 1999 720p Web Rip.avi': {'size': 1200, 'quality': 'scr'},
'Movie Name 1999 Web DL.avi': {'size': 800, 'quality': 'brrip'},
'Movie.Name.1999.1080p.WEBRip.H264-Group': {'size': 1500, 'quality': 'scr'},
'Movie.Name.1999.DVDRip-Group': {'size': 750, 'quality': 'dvdrip'},
'Movie.Name.1999.DVD-Rip-Group': {'size': 700, 'quality': 'dvdrip'},
'Movie.Name.1999.DVD-R-Group': {'size': 4500, 'quality': 'dvdr'},
'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': {'size': 5500, 'quality': '720p'},
'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': {'size': 8500, 'extra': {'resolution_width': 1920, 'resolution_height': 1080} , 'quality': '1080p'},
'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': {'size': 8000, 'quality': '1080p'},
'Movie.Name.2013.BR-Disk-Group.iso': {'size': 48000, 'quality': 'bd50'},
'Movie.Name.2013.2D+3D.BR-Disk-Group.iso': {'size': 52000, 'quality': 'bd50', 'is_3d': True},
'Movie.Rising.Name.Girl.2011.NTSC.DVD9-GroupDVD': {'size': 7200, 'quality': 'dvdr'},
'Movie Name (2013) 2D + 3D': {'size': 49000, 'quality': 'bd50', 'is_3d': True},
'Movie Monuments 2013 BrRip 1080p': {'size': 1800, 'quality': 'brrip'},
'Movie Monuments 2013 BrRip 720p': {'size': 1300, 'quality': 'brrip'},
'The.Movie.2014.3D.1080p.BluRay.AVC.DTS-HD.MA.5.1-GroupName': {'size': 30000, 'quality': 'bd50', 'is_3d': True},
'/home/namehou/Movie Monuments (2012)/Movie Monuments.mkv': {'size': 5500, 'quality': '720p', 'is_3d': False},
'/home/namehou/Movie Monuments (2012)/Movie Monuments Full-OU.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True},
'/home/namehou/Movie Monuments (2013)/Movie Monuments.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': False},
'/home/namehou/Movie Monuments (2013)/Movie Monuments Full-OU.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': True},
'/volume1/Public/3D/Moviename/Moviename (2009).3D.SBS.ts': {'size': 7500, 'quality': '1080p', 'is_3d': True},
'/volume1/Public/Moviename/Moviename (2009).ts': {'size': 7500, 'quality': '1080p'},
'/movies/BluRay HDDVD H.264 MKV 720p EngSub/QuiQui le fou (criterion collection #123, 1915)/QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'},
'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'},
'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) half-sbs 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True},
'Moviename 2014 720p HDCAM XviD DualAudio': {'size': 4000, 'quality': 'cam'},
'Moviename (2014) - 720p CAM x264': {'size': 2250, 'quality': 'cam'},
'Movie Name (2014).mp4': {'size': 750, 'quality': 'brrip'},
'Moviename.2014.720p.R6.WEB-DL.x264.AC3-xyz': {'size': 750, 'quality': 'r5'},
'Movie name 2014 New Source 720p HDCAM x264 AC3 xyz': {'size': 750, 'quality': 'cam'},
'Movie.Name.2014.720p.HD.TS.AC3.x264': {'size': 750, 'quality': 'ts'},
'Movie.Name.2014.1080p.HDrip.x264.aac-ReleaseGroup': {'size': 7000, 'quality': 'brrip'},
'Movie.Name.2014.HDCam.Chinese.Subs-ReleaseGroup': {'size': 15000, 'quality': 'cam'},
'Movie Name 2014 HQ DVDRip X264 AC3 (bla)': {'size': 0, 'quality': 'dvdrip'},
'Movie Name1 (2012).mkv': {'size': 4500, 'quality': '720p'},
'Movie Name (2013).mkv': {'size': 8500, 'quality': '1080p'},
'Movie Name (2014).mkv': {'size': 4500, 'quality': '720p', 'extra': {'titles': ['Movie Name 2014 720p Bluray']}},
'Movie Name (2015).mkv': {'size': 500, 'quality': '1080p', 'extra': {'resolution_width': 1920}},
'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'},
'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'},
'Movie Name.2014.720p Web-Dl Aac2.0 h264-ReleaseGroup': {'size': 3800, 'quality': 'brrip'},
'Movie Name.2014.720p.WEBRip.x264.AC3-ReleaseGroup': {'size': 3000, 'quality': 'scr'},
'Movie.Name.2014.1080p.HDCAM.-.ReleaseGroup': {'size': 5300, 'quality': 'cam'},
}
correct = 0
for name in tests:
test_quality = self.guess(files = [name], extra = tests[name].get('extra', None), size = tests[name].get('size', None), use_cache = False) or {}
success = test_quality.get('identifier') == tests[name]['quality'] and test_quality.get('is_3d') == tests[name].get('is_3d', False)
if not success:
log.error('%s failed check, thinks it\'s "%s" expecting "%s"', (name,
test_quality.get('identifier') + (' 3D' if test_quality.get('is_3d') else ''),
tests[name]['quality'] + (' 3D' if tests[name].get('is_3d') else '')
))
correct += success
if correct == len(tests):
log.info('Quality test successful')
return True
else:
log.error('Quality test failed: %s out of %s succeeded', (correct, len(tests)))
|
"""
This module tests capnp serialization of HTMPredictionModel.
"""
import copy
import datetime
import numpy.random
import numpy.testing
import unittest
try:
# NOTE need to import capnp first to activate the magic necessary for
# PythonDummyRegion_capnp, etc.
import capnp
except ImportError:
capnp = None
else:
from nupic.frameworks.opf.HTMPredictionModelProto_capnp \
import HTMPredictionModelProto
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.htm_prediction_model import HTMPredictionModel
CPP_MODEL_PARAMS = {
'model': 'HTMPrediction',
'version': 1,
'aggregationInfo': {
'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
'modelParams': {
# inferenceType choices:
#
# TemporalNextStep, TemporalClassification, NontemporalClassification,
# TemporalAnomaly, NontemporalAnomaly, TemporalMultiStep,
# NontemporalMultiStep
#
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
'verbosity' : 0,
'encoders': {
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'c1',
'type': 'ScalarEncoder',
'w': 21},
},
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
'spatialImp' : 'cpp',
'spVerbosity' : 0,
'globalInhibition': 1,
'columnCount': 512,
'inputWidth': 0,
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
'potentialPct': 0.5,
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.005,
},
'tmEnable' : True,
'tmParams': {
'temporalImp': 'cpp',
'verbosity': 0,
'columnCount': 512,
'cellsPerColumn': 8,
'inputWidth': 512,
'seed': 1960,
'newSynapseCount': 10,
'maxSynapsesPerSegment': 20,
'maxSegmentsPerCell': 32,
'initialPerm': 0.21,
'permanenceInc': 0.1,
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
'minThreshold': 4,
'activationThreshold': 6,
'outputType': 'normal',
'pamLength': 1,
},
'clParams': {
'implementation': 'cpp',
'regionName': 'SDRClassifierRegion',
'verbosity' : 0,
'alpha': 0.005,
'steps': '1,5',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
PY_MODEL_PARAMS = {
'model': 'HTMPrediction',
'version': 1,
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
'modelParams': {
# inferenceType choices:
#
# TemporalNextStep, TemporalClassification, NontemporalClassification,
# TemporalAnomaly, NontemporalAnomaly, TemporalMultiStep,
# NontemporalMultiStep
#
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
'verbosity' : 0,
'encoders': {
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'c1',
'type': 'ScalarEncoder',
'w': 21},
},
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
'spatialImp' : 'py',
'spVerbosity' : 0,
'globalInhibition': 1,
'columnCount': 512,
'inputWidth': 0,
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
'potentialPct': 0.5,
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.005,
},
'tmEnable' : True,
'tmParams': {
'temporalImp': 'py',
'verbosity': 0,
'columnCount': 512,
'cellsPerColumn': 8,
'inputWidth': 512,
'seed': 1960,
'newSynapseCount': 10,
'maxSynapsesPerSegment': 20,
'maxSegmentsPerCell': 32,
'initialPerm': 0.21,
'permanenceInc': 0.1,
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
'minThreshold': 4,
'activationThreshold': 6,
'outputType': 'normal',
'pamLength': 1,
},
'clParams': {
'implementation': 'py',
'regionName': 'SDRClassifierRegion',
'verbosity' : 0,
'alpha': 0.005,
'steps': '1,5',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
class HTMPredictionModelSerializationTest(unittest.TestCase):
def _runModelSerializationDeserializationChecks(self, modelParams):
m1 = ModelFactory.create(modelParams)
m1.enableInference({'predictedField': 'consumption'})
headers = ['timestamp', 'consumption']
record = [datetime.datetime(2013, 12, 12), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
m1.run(modelInput)
# Serialize
builderProto = HTMPredictionModelProto.new_message()
m1.write(builderProto)
# Construct HTMPredictionModelProto reader from populated builder
readerProto = HTMPredictionModelProto.from_bytes(builderProto.to_bytes())
# Deserialize
m2 = HTMPredictionModel.read(readerProto)
self.assertEqual(m1.getInferenceType(),
modelParams['modelParams']['inferenceType'])
self.assertEqual(m1.getInferenceType(), m2.getInferenceType())
# Run computes on m1 & m2 and compare results
record = [datetime.datetime(2013, 12, 14), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
# Use deepcopy to guarantee no input side-effect between calls
r1 = m1.run(copy.deepcopy(modelInput))
r2 = m2.run(copy.deepcopy(modelInput))
# Compare results
self.assertEqual(r2.predictionNumber, r1.predictionNumber)
self.assertEqual(r2.rawInput, r1.rawInput)
self.assertEqual(r2.sensorInput.dataRow, r1.sensorInput.dataRow)
self.assertEqual(r2.sensorInput.dataDict, r1.sensorInput.dataDict)
numpy.testing.assert_array_equal(r2.sensorInput.dataEncodings,
r1.sensorInput.dataEncodings)
self.assertEqual(r2.sensorInput.sequenceReset, r1.sensorInput.sequenceReset)
self.assertEqual(r2.sensorInput.category, r1.sensorInput.category)
self.assertEqual(r2.inferences, r1.inferences)
self.assertEqual(r2.metrics, r1.metrics)
self.assertEqual(r2.predictedFieldIdx, r1.predictedFieldIdx)
self.assertEqual(r2.predictedFieldName, r1.predictedFieldName)
numpy.testing.assert_array_equal(r2.classifierInput.dataRow,
r1.classifierInput.dataRow)
self.assertEqual(r2.classifierInput.bucketIndex,
r1.classifierInput.bucketIndex)
# Compre regions
self.assertIsNotNone(m2._getSensorRegion())
self.assertEqual(m2._getSensorRegion(), m1._getSensorRegion())
self.assertIsNotNone(m2._getClassifierRegion())
self.assertEqual(m2._getClassifierRegion(), m1._getClassifierRegion())
self.assertIsNotNone(m2._getTPRegion())
self.assertEqual(m2._getTPRegion(), m1._getTPRegion())
self.assertIsNotNone(m2._getSPRegion())
self.assertEqual(m2._getSPRegion(), m1._getSPRegion())
@unittest.skipUnless(
capnp, 'pycapnp is not installed, skipping serialization test.')
def testPredictedFieldAndInferenceEnabledAreSaved(self):
m1 = ModelFactory.create(PY_MODEL_PARAMS)
m1.enableInference({'predictedField': 'consumption'})
self.assertTrue(m1.isInferenceEnabled())
self.assertEqual(m1.getInferenceArgs().get('predictedField'), 'consumption')
headers = ['timestamp', 'consumption']
record = [datetime.datetime(2013, 12, 12), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
m1.run(modelInput)
# Serialize
builderProto = HTMPredictionModelProto.new_message()
m1.write(builderProto)
# Construct HTMPredictionModelProto reader from populated builder
readerProto = HTMPredictionModelProto.from_bytes(builderProto.to_bytes())
# Deserialize
m2 = HTMPredictionModel.read(readerProto)
self.assertTrue(m2.isInferenceEnabled())
self.assertEqual(m2.getInferenceArgs().get('predictedField'), 'consumption')
# Running the desrialized m2 without redundant enableInference call should
# work
record = [datetime.datetime(2013, 12, 14), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
m2.run(modelInput)
# Check that disabled inference is saved, too (since constructor defaults to
# enabled at time of this writing)
m1.disableInference()
self.assertFalse(m1.isInferenceEnabled())
builderProto = HTMPredictionModelProto.new_message()
m1.write(builderProto)
readerProto = HTMPredictionModelProto.from_bytes(builderProto.to_bytes())
m3 = HTMPredictionModel.read(readerProto)
self.assertFalse(m3.isInferenceEnabled())
@unittest.skipUnless(
capnp, 'pycapnp is not installed, skipping serialization test.')
def testCPPModelSerialization(self):
self._runModelSerializationDeserializationChecks(CPP_MODEL_PARAMS)
@unittest.skipUnless(
capnp, 'pycapnp is not installed, skipping serialization test.')
def testPYModelSerialization(self):
self._runModelSerializationDeserializationChecks(PY_MODEL_PARAMS)
if __name__ == "__main__":
unittest.main()
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
try_get,
unified_timestamp,
)
class EggheadCourseIE(InfoExtractor):
IE_DESC = 'egghead.io course'
IE_NAME = 'egghead:course'
_VALID_URL = r'https://egghead\.io/courses/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
'playlist_count': 29,
'info_dict': {
'id': 'professor-frisby-introduces-composable-functional-javascript',
'title': 'Professor Frisby Introduces Composable Functional JavaScript',
'description': 're:(?s)^This course teaches the ubiquitous.*You\'ll start composing functionality before you know it.$',
},
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
course = self._download_json(
'https://egghead.io/api/v1/series/%s' % playlist_id, playlist_id)
entries = [
self.url_result(
'wistia:%s' % lesson['wistia_id'], ie='Wistia',
video_id=lesson['wistia_id'], video_title=lesson.get('title'))
for lesson in course['lessons'] if lesson.get('wistia_id')]
return self.playlist_result(
entries, playlist_id, course.get('title'),
course.get('description'))
class EggheadLessonIE(InfoExtractor):
IE_DESC = 'egghead.io lesson'
IE_NAME = 'egghead:lesson'
_VALID_URL = r'https://egghead\.io/lessons/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
'info_dict': {
'id': 'fv5yotjxcg',
'ext': 'mp4',
'title': 'Create linear data flow with container style types (Box)',
'description': 'md5:9aa2cdb6f9878ed4c39ec09e85a8150e',
'thumbnail': r're:^https?:.*\.jpg$',
'timestamp': 1481296768,
'upload_date': '20161209',
'duration': 304,
'view_count': 0,
'tags': ['javascript', 'free'],
},
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
lesson_id = self._match_id(url)
lesson = self._download_json(
'https://egghead.io/api/v1/lessons/%s' % lesson_id, lesson_id)
return {
'_type': 'url_transparent',
'ie_key': 'Wistia',
'url': 'wistia:%s' % lesson['wistia_id'],
'id': lesson['wistia_id'],
'title': lesson.get('title'),
'description': lesson.get('summary'),
'thumbnail': lesson.get('thumb_nail'),
'timestamp': unified_timestamp(lesson.get('published_at')),
'duration': int_or_none(lesson.get('duration')),
'view_count': int_or_none(lesson.get('plays_count')),
'tags': try_get(lesson, lambda x: x['tag_list'], list),
}
|
"""Sampling functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
__all__ = ['rejection_sample',
'stratified_sample',]
def rejection_sample(tensors, accept_prob_fn, batch_size, queue_threads=1,
enqueue_many=False, prebatch_capacity=16,
prebatch_threads=1, runtime_checks=False, name=None):
"""Stochastically creates batches by rejection sampling.
Each list of non-batched tensors is evaluated by `accept_prob_fn`, to produce
a scalar tensor between 0 and 1. This tensor corresponds to the probability of
being accepted. When `batch_size` tensor groups have been accepted, the batch
queue will return a mini-batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
accept_prob_fn: A python lambda that takes a non-batch tensor from each
item in `tensors`, and produces a scalar tensor.
batch_size: Size of batch to be returned.
queue_threads: The number of threads for the queue that will hold the final
batch.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
prebatch_capacity: Capacity for the large queue that is used to convert
batched tensors to single examples.
prebatch_threads: Number of threads for the large queue that is used to
convert batched tensors to single examples.
runtime_checks: Bool. If true, insert runtime checks on the output of
`accept_prob_fn`. Using `True` might have a performance impact.
name: Optional prefix for ops created by this function.
Raises:
ValueError: enqueue_many is True and labels doesn't have a batch
dimension, or if enqueue_many is False and labels isn't a scalar.
ValueError: enqueue_many is True, and batch dimension on data and labels
don't match.
ValueError: if a zero initial probability class has a nonzero target
probability.
Returns:
A list of tensors of the same length as `tensors`, with batch dimension
`batch_size`.
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to data tensor.
accept_prob_fn = lambda x: (tf.tanh(x[0]) + 1) / 2
data_batch = tf.contrib.training.rejection_sample(
[data, label], accept_prob_fn, 16)
# Run batch through network.
...
"""
with variable_scope.variable_scope(name, 'rejection_sample', tensors):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
# Reduce the case of a batched example to that of a batch of a single
# example by taking a batch of size one.
if enqueue_many:
# Validate that batch dimension of the input is consistent.
tensor_list = _verify_data_inputs(tensor_list)
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(tensor_list,
batch_size=1,
num_threads=prebatch_threads,
capacity=prebatch_capacity,
enqueue_many=True)
tensor_list = [array_ops.squeeze(x, [0]) for x in batched]
# Set up a queue containing batches that have the distribution.
cur_prob = accept_prob_fn(tensor_list)
if runtime_checks:
cur_prob = array_ops.identity(control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(0.0, cur_prob),
check_ops.assert_less_equal(cur_prob, 1.0)],
cur_prob), name='prob_with_checks')
keep_input = random_ops.random_uniform([]) < cur_prob
return _conditional_batch(
tensor_list, keep_input, batch_size, num_threads=queue_threads)
def stratified_sample(tensors, labels, target_probs, batch_size,
init_probs=None, enqueue_many=False, queue_capacity=16,
threads_per_queue=1, name=None):
"""Stochastically creates batches based on per-class probabilities.
This method discards examples. Internally, it creates one queue to amortize
the cost of disk reads, and one queue to hold the properly-proportioned
batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
labels: Tensor for label of data. Label is a single integer or a batch,
depending on enqueue_many. It is not a one-hot vector.
target_probs: Target class proportions in batch. An object whose type has a
registered Tensor conversion function.
batch_size: Size of batch to be returned.
init_probs: Class proportions in the data. An object whose type has a
registered Tensor conversion function, or `None` for estimating the
initial distribution.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
queue_capacity: Capacity of the large queue that holds input examples.
threads_per_queue: Number of threads for the large queue that holds input
examples and for the final queue with the proper class proportions.
name: Optional prefix for ops created by this function.
Raises:
ValueError: enqueue_many is True and labels doesn't have a batch
dimension, or if enqueue_many is False and labels isn't a scalar.
ValueError: enqueue_many is True, and batch dimension on data and labels
don't match.
ValueError: if probs don't sum to one.
ValueError: if a zero initial probability class has a nonzero target
probability.
TFAssertion: if labels aren't integers in [0, num classes).
Returns:
(data_batch, label_batch), where data_batch is a list of tensors of the same
length as `tensors`
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to per-class probabilities.
target_probs = [...distribution you want...]
[data_batch], labels = tf.contrib.training.stratified_sample(
[data], label, target_probs)
# Run batch through network.
...
"""
with ops.name_scope(name, 'stratified_sample', tensors + [labels]):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
labels = ops.convert_to_tensor(labels)
target_probs = ops.convert_to_tensor(target_probs, dtype=dtypes.float32)
# Reduce the case of a single example to that of a batch of size 1.
if not enqueue_many:
tensor_list = [array_ops.expand_dims(tensor, 0) for tensor in tensor_list]
labels = array_ops.expand_dims(labels, 0)
# If `init_probs` is `None`, set up online estimation of data distribution.
if init_probs is None:
# We use `target_probs` to get the number of classes, so its shape must be
# fully defined at graph construction time.
target_probs.get_shape().assert_is_fully_defined()
init_probs = _estimate_data_distribution(
labels, target_probs.get_shape().num_elements())
else:
init_probs = ops.convert_to_tensor(init_probs, dtype=dtypes.float32)
# Validate that input is consistent.
tensor_list, labels, [init_probs, target_probs] = _verify_input(
tensor_list, labels, [init_probs, target_probs])
# Check that all zero initial probabilities also have zero target
# probabilities.
assert_op = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.logical_or(
math_ops.not_equal(init_probs, 0),
math_ops.equal(target_probs, 0))),
['All classes with zero initial probability must also have zero target '
'probability: ', init_probs, target_probs])
init_probs = control_flow_ops.with_dependencies([assert_op], init_probs)
# Calculate acceptance sampling probabilities.
accept_probs = _calculate_acceptance_probabilities(init_probs, target_probs)
proportion_rejected = math_ops.reduce_sum((1 - accept_probs) * init_probs)
accept_probs = control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_probs,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_probs, [accept_probs],
message='Proportion of examples rejected by sampler is high.',
first_n=10))
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(tensor_list + [labels],
batch_size=1,
num_threads=threads_per_queue,
capacity=queue_capacity,
enqueue_many=True)
val_list = [array_ops.squeeze(x, [0]) for x in batched[:-1]]
label = array_ops.squeeze(batched[-1], [0])
# Set up second queue containing batches that have the desired class
# proportions.
cur_prob = array_ops.gather(accept_probs, label)
keep_input = random_ops.random_uniform([]) < cur_prob
batched = _conditional_batch(
val_list + [label],
keep_input,
batch_size,
num_threads=threads_per_queue)
return batched[:-1], batched[-1]
def _estimate_data_distribution(labels, num_classes, smoothing_constant=10):
"""Estimate data distribution as labels are seen."""
# Variable to track running count of classes. Smooth by a nonzero value to
# avoid division-by-zero. Higher values provide more stability at the cost of
# slower convergence.
if smoothing_constant <= 0:
raise ValueError('smoothing_constant must be nonzero.')
num_examples_per_class_seen = variables.Variable(
initial_value=[smoothing_constant] * num_classes, trainable=False,
name='class_count', dtype=dtypes.int64)
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
math_ops.reduce_sum(array_ops.one_hot(labels, num_classes,
dtype=dtypes.int64), 0))
# Normalize count into a probability.
# NOTE: Without the `+= 0` line below, the test
# `testMultiThreadedEstimateDataDistribution` fails. The reason is that
# before this line, `num_examples_per_class_seen` is a Tensor that shares a
# buffer with an underlying `ref` object. When the `ref` is changed by another
# thread, `num_examples_per_class_seen` changes as well. Since this can happen
# in the middle of the normalization computation, we get probabilities that
# are very far from summing to one. Adding `+= 0` copies the contents of the
# tensor to a new buffer, which will be consistent from the start to the end
# of the normalization computation.
num_examples_per_class_seen += 0
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
# Must return float32 (not float64) to agree with downstream `_verify_input`
# checks.
return math_ops.cast(init_prob_estimate, dtypes.float32)
def _verify_data_inputs(tensor_list):
"""Verify that batched data inputs are well-formed."""
for tensor in tensor_list:
# Data tensor should have a batch dimension.
tensor_shape = tensor.get_shape().with_rank_at_least(1)
# Data batch dimensions must be compatible.
tensor_shape[0].assert_is_compatible_with(tensor_list[0].get_shape()[0])
return tensor_list
def _verify_input(tensor_list, labels, probs_list):
"""Verify that batched inputs are well-formed."""
checked_probs_list = []
for probs in probs_list:
# Since number of classes shouldn't change at runtime, probalities shape
# should be fully defined.
probs.get_shape().assert_is_fully_defined()
# Probabilities must be 1D.
probs.get_shape().assert_has_rank(1)
# Probabilities must be nonnegative and sum to one.
tol = 1e-6
prob_sum = math_ops.reduce_sum(probs)
checked_probs = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(probs),
check_ops.assert_less(prob_sum, 1.0 + tol),
check_ops.assert_less(1.0 - tol, prob_sum)],
probs)
checked_probs_list.append(checked_probs)
# All probabilities should be the same length.
prob_length = checked_probs_list[0].get_shape().num_elements()
for checked_prob in checked_probs_list:
if checked_prob.get_shape().num_elements() != prob_length:
raise ValueError('Probability parameters must have the same length.')
# Labels tensor should only have batch dimension.
labels.get_shape().assert_has_rank(1)
for tensor in tensor_list:
# Data tensor should have a batch dimension.
tensor_shape = tensor.get_shape().with_rank_at_least(1)
# Data and label batch dimensions must be compatible.
tensor_shape[0].assert_is_compatible_with(labels.get_shape()[0])
# Data and labels must have the same, strictly positive batch size. Since we
# can't assume we know the batch size at graph creation, add runtime checks.
labels_batch_size = array_ops.shape(labels)[0]
lbl_assert = check_ops.assert_positive(labels_batch_size)
# Make each tensor depend on its own checks.
labels = control_flow_ops.with_dependencies([lbl_assert], labels)
tensor_list = [control_flow_ops.with_dependencies(
[lbl_assert,
check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)],
x) for x in tensor_list]
# Label's classes must be integers 0 <= x < num_classes.
labels = control_flow_ops.with_dependencies(
[check_ops.assert_integer(labels),
check_ops.assert_non_negative(labels),
check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))],
labels)
return tensor_list, labels, checked_probs_list
def _calculate_acceptance_probabilities(init_probs, target_probs):
"""Calculate the per-class acceptance rates.
Args:
init_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class acceptance probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variabes is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
"""
# Make list of t_i / p_i.
ratio_l = target_probs / init_probs
# Replace NaNs with 0s.
ratio_l = math_ops.select(math_ops.is_nan(ratio_l),
array_ops.zeros_like(ratio_l),
ratio_l)
# Calculate list of acceptance probabilities.
max_ratio = math_ops.reduce_max(ratio_l)
return ratio_l / max_ratio
def _conditional_batch(tensors, keep_input, batch_size, num_threads=10):
"""Conditionally enqueue tensors based on accept_prob.
Specifically, enqueue the element if accept_prob > rand_unif([0, 1]).
Args:
tensors: List of tensors to enqueue.
keep_input: Bool. Whether to enqueue or not.
batch_size: Size of batch.
num_threads: Number of enqueueing threads.
Returns:
List of batched tensors.
Raises:
ValueError: `accept_prob` isn't 0D.
"""
keep_input.get_shape().assert_has_rank(0)
# Determine shapes and types of to-be-enqueued-tensors.
shapes_list = []
dtypes_list = []
for tensor in tensors:
cur_shape = tensor.get_shape()
cur_shape.assert_is_fully_defined()
shapes_list.append(cur_shape)
dtypes_list.append(tensor.dtype)
final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
shapes=shapes_list,
dtypes=dtypes_list,
name='batched_queue')
summary.scalar('queue/%s/size' % final_q.name, final_q.size())
# Conditionally enqueue.
# Reshape enqueue op to match no_op's shape.
conditional_enqueue = control_flow_ops.cond(
keep_input,
lambda: final_q.enqueue(tensors),
control_flow_ops.no_op)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
final_q, [conditional_enqueue] * num_threads))
out_tensor = final_q.dequeue_many(batch_size)
# Queues return a single tensor if the list of enqued tensors is one. Since we
# want the type to be the same in all cases, always return a list.
if isinstance(out_tensor, ops.Tensor):
out_tensor = [out_tensor]
return out_tensor
|
import os
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, 'perf'))
from core import path_util
path_util.AddPyUtilsToPath()
path_util.AddTracingToPath()
import metadata_extractor
from metadata_extractor import OSName
from core.tbmv3 import trace_processor
import mock
class ExtractMetadataTestCase(unittest.TestCase):
def setUp(self):
self.trace_processor_path = 'trace_processor_shell'
self.trace_file = 'trace_file.proto'
def _RunQueryParams(self, query):
"""Returns tuple of RunQuery function parameters.
Args:
query: sql query to extract metadata from proto trace.
"""
return (self.trace_processor_path, self.trace_file, query)
def _CreateRunQueryResults(self,
version_number_results=frozenset([]),
os_name_results=frozenset([]),
architecture_results=frozenset([]),
bitness_results=frozenset([]),
version_code_results=frozenset([]),
modules_results=frozenset([])):
"""Mock return of RunQuery calls.
See trace_processor.RunQuery for the format of the query results.
Each parameter is a result dictionary for corresponding SQL query
defined in metadata_extractor.py. For example, valid value for
os_name_results = [{'str_value' : 'pineapple'}]
Returns:
A dictionary mapping of RunQuery parameters to their mocked
RunQuery function return values.
"""
return {
self._RunQueryParams(metadata_extractor.VERSION_NUM_QUERY):
version_number_results,
self._RunQueryParams(metadata_extractor.OS_NAME_QUERY): os_name_results,
self._RunQueryParams(metadata_extractor.ARCH_QUERY):
architecture_results,
self._RunQueryParams(metadata_extractor.BITNESS_QUERY): bitness_results,
self._RunQueryParams(metadata_extractor.VERSION_CODE_QUERY):
version_code_results,
self._RunQueryParams(metadata_extractor.MODULES_QUERY): modules_results
}
def _CreateRunQueryResultsFromValues(self,
version_number=None,
os_name=None,
architecture=None,
bitness=None,
version_code=None,
modules=None):
"""Mock return of RunQuery calls by values, except for modules.
Args:
version_number: string containing chrome version number
(eg: 'Chrome/93.0.4537.0').
os_name: string of platform of the trace writer (eg. 'Android').
architecture: string of OS arch of the trace writer, as returned by
base::SysInfo::OperatingSystemArchitecture() (eg: 'x86_64).
bitness: string of architecture bitness (eg. '32', '64').
version_code: string of version code of chrome used by Android
play store.
modules: list of dictionaries mock return value of RunQuery
when its called with sql query metadata_extractor.MODULES_QUERY
See _CreateRunQueryResults function for more information.
(eg: [{'name': '/libmonochrome.so, 'build_id': '3284389AB83CD'}]).
Returns:
A dictionary mapping of RunQuery parameters to their mocked
RunQuery function return values.
"""
return self._CreateRunQueryResults(version_number_results=[{
'str_value':
version_number
}],
os_name_results=[{
'str_value': os_name
}],
architecture_results=[{
'str_value':
architecture
}],
bitness_results=[{
'int_value': bitness
}],
version_code_results=[{
'int_value':
version_code
}],
modules_results=modules)
def testExtractMetadata(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(
version_number='Chrome/36.9.7934.4',
os_name='Android',
architecture='x86_64',
bitness='64',
version_code='857854',
modules=[{
'name': '/libmonochrome.so',
'build_id': '3284389AB83CD'
}, {
'name': '/missing',
'build_id': 'AB3288CDE3283'
}, {
'name': '/chrome.so',
'build_id': 'abcdef'
}])
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.version_number, '36.9.7934.4')
self.assertEqual(extractor.os_name, OSName.ANDROID)
self.assertEqual(extractor.architecture, 'x86_64')
self.assertEqual(extractor.bitness, '64')
self.assertEqual(extractor.version_code, '857854')
self.assertEqual(extractor.modules, {
'/libmonochrome.so': '3284389AB83CD',
'/chrome.so': 'ABCDEF'
})
def testExtractMetadataEmptyList(self):
def side_effect(*args):
params = self._CreateRunQueryResults()
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.version_number, None)
self.assertEqual(extractor.os_name, None)
self.assertEqual(extractor.architecture, None)
self.assertEqual(extractor.bitness, None)
self.assertEqual(extractor.version_code, None)
self.assertEqual(extractor.modules, None)
def testExtractMetadataValuesNull(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(modules=[{
'name': None,
'build_id': None
}, {
'name': None,
'build_id': None
}])
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.version_number, None)
self.assertEqual(extractor.os_name, None)
self.assertEqual(extractor.architecture, None)
self.assertEqual(extractor.bitness, None)
self.assertEqual(extractor.version_code, None)
self.assertEqual(extractor.modules, None)
def testExtractMetadataVersionNumberParsed(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(
version_number='36.9.7934.4')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.version_number, '36.9.7934.4')
def testParseOSNameAndroid(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(os_name='Android')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.os_name, OSName.ANDROID)
def testParseOSNameLinux(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(os_name='Linux')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.os_name, OSName.LINUX)
def testParseMac64(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(
os_name='Mac OS X', version_number='Chrome/28.9.9364.32-64')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.version_number, '28.9.9364.32')
self.assertEqual(extractor.os_name, OSName.MAC)
def testParseOSNameWindows(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(os_name='Windows NT')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.os_name, OSName.WINDOWS)
def testParseOSNameCrOS(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(os_name='CrOS')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.os_name, OSName.CROS)
def testParseOSNameFuschia(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(os_name='Fuschia')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
extractor.Initialize()
self.assertEqual(extractor.os_name, OSName.FUSCHIA)
def testParseOSNameNotRecognized(self):
def side_effect(*args):
params = self._CreateRunQueryResultsFromValues(os_name='blah')
return params[args]
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
trace_processor.RunQuery = mock.MagicMock(side_effect=side_effect)
exception_msg = 'OS name "blah" not recognized: ' + self.trace_file
with self.assertRaises(Exception) as context:
extractor.Initialize()
self.assertEqual(exception_msg, str(context.exception))
def testGetModuleIds(self):
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
extractor.InitializeForTesting(modules={
'name': '13423EDFAB2',
'name2': '321468945',
'name3': '4093492737482'
})
self.assertEqual(extractor.GetModuleIds(),
{'13423EDFAB2', '321468945', '4093492737482'})
def testGetModuleIdsEmpty(self):
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
extractor.InitializeForTesting(modules={})
self.assertEqual(extractor.GetModuleIds(), set())
def testGetModuleIdsNone(self):
extractor = metadata_extractor.MetadataExtractor(self.trace_processor_path,
self.trace_file)
extractor.InitializeForTesting(modules=None)
self.assertEqual(extractor.GetModuleIds(), None)
if __name__ == '__main__':
unittest.main()
|
from nipype.testing import assert_equal
from nipype.interfaces.slicer.utilities import EMSegmentTransformToNewFormat
def test_EMSegmentTransformToNewFormat_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputMRMLFileName=dict(argstr='--inputMRMLFileName %s',
),
outputMRMLFileName=dict(argstr='--outputMRMLFileName %s',
hash_files=False,
),
templateFlag=dict(argstr='--templateFlag ',
),
terminal_output=dict(nohash=True,
),
)
inputs = EMSegmentTransformToNewFormat.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_EMSegmentTransformToNewFormat_outputs():
output_map = dict(outputMRMLFileName=dict(),
)
outputs = EMSegmentTransformToNewFormat.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
import volatility.obj as obj
class _KDDEBUGGER_DATA64(obj.CType):
"""A class for KDBG"""
def is_valid(self):
"""Returns true if the kdbg_object appears valid"""
# Check the OwnerTag is in fact the string KDBG
return obj.CType.is_valid(self) and self.Header.OwnerTag == 0x4742444B
@property
def ServicePack(self):
"""Get the service pack number. This is something
like 0x100 for SP1, 0x200 for SP2 etc.
"""
csdresult = obj.Object("unsigned long", offset = self.CmNtCSDVersion, vm = self.obj_native_vm)
return (csdresult >> 8) & 0xffffffff
def processes(self):
"""Enumerate processes"""
# This is defined as a pointer to _LIST_ENTRY in the overlay
list_head = self.PsActiveProcessHead.dereference()
if not list_head:
raise AttributeError("Could not list tasks, please verify your --profile with kdbgscan")
for l in list_head.list_of_type("_EPROCESS", "ActiveProcessLinks"):
yield l
def modules(self):
"""Enumerate modules"""
# This is defined as a pointer to _LIST_ENTRY in the overlay
list_head = self.PsLoadedModuleList.dereference()
if not list_head:
raise AttributeError("Could not list modules, please verify your --profile with kdbgscan")
for l in list_head.dereference_as("_LIST_ENTRY").list_of_type(
"_LDR_DATA_TABLE_ENTRY", "InLoadOrderLinks"):
yield l
def dbgkd_version64(self):
"""Scan backwards from the base of KDBG to find the
_DBGKD_GET_VERSION64. We have a winner when kernel
base addresses and process list head match."""
# Account for address masking differences in x86 and x64
memory_model = self.obj_native_vm.profile.metadata.get('memory_model', '32bit')
dbgkd_off = self.obj_offset & 0xFFFFFFFFFFFFF000
dbgkd_end = dbgkd_off + 0x1000
# The _DBGKD_GET_VERSION64 structure is autogenerated, so
# this value should be correct for each profile
dbgkd_size = self.obj_native_vm.profile.get_obj_size("_DBGKD_GET_VERSION64")
while dbgkd_off <= (dbgkd_end - dbgkd_size):
dbgkd = obj.Object("_DBGKD_GET_VERSION64",
offset = dbgkd_off,
vm = self.obj_native_vm)
if memory_model == "32bit":
KernBase = dbgkd.KernBase & 0xFFFFFFFF
PsLoadedModuleList = dbgkd.PsLoadedModuleList & 0xFFFFFFFF
else:
KernBase = dbgkd.KernBase
PsLoadedModuleList = dbgkd.PsLoadedModuleList
if ((KernBase == self.KernBase) and (PsLoadedModuleList == self.PsLoadedModuleList)):
return dbgkd
dbgkd_off += 1
return obj.NoneObject("Cannot find _DBGKD_GET_VERSION64")
def kpcrs(self):
"""Generator for KPCRs referenced by this KDBG.
These are returned in the order in which the
processors were registered.
"""
if self.obj_native_vm.profile.metadata.get('memory_model', '32bit') == '32bit':
prcb_member = "PrcbData"
else:
prcb_member = "Prcb"
cpu_array = self.KiProcessorBlock.dereference()
for p in cpu_array:
# Terminate the loop if an item in the array is
# invalid (ie paged) or if the pointer is NULL.
if p == None or p == 0:
break
kpcrb = p.dereference_as("_KPRCB")
kpcr = obj.Object("_KPCR", offset = kpcrb.obj_offset -
self.obj_native_vm.profile.get_obj_offset("_KPCR", prcb_member),
vm = self.obj_native_vm,
parent = self,
)
if kpcr.is_valid():
yield kpcr
class KDBGObjectClass(obj.ProfileModification):
"""Add the KDBG object class to all Windows profiles"""
before = ["WindowsObjectClasses"]
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.object_classes.update({'_KDDEBUGGER_DATA64': _KDDEBUGGER_DATA64})
# This value is stored in nt!_KeMaximumProcessors
if profile.metadata.get('memory_model', '32bit'):
max_processors = 32
else:
max_processors = 64
profile.merge_overlay({
'_KDDEBUGGER_DATA64': [ None, {
'NtBuildLab': [ None, ['pointer', ['String', dict(length = 32)]]],
'KiProcessorBlock': [ None, ['pointer', ['array', max_processors, ['pointer', ['_KPRCB']]]]],
'PsActiveProcessHead': [ None, ['pointer', ['_LIST_ENTRY']]],
'PsLoadedModuleList': [ None, ['pointer', ['_LIST_ENTRY']]],
'MmUnloadedDrivers' : [ None, ['pointer', ['pointer', ['array', lambda x : x.MmLastUnloadedDriver.dereference(), ['_UNLOADED_DRIVER']]]]],
'MmLastUnloadedDriver' : [ None, ['pointer', ['unsigned int']]],
}]})
class UnloadedDriverVTypes(obj.ProfileModification):
"""Add the unloaded driver structure definitions"""
conditions = {'os': lambda x: x == "windows"}
def modification(self, profile):
if profile.metadata.get("memory_model", "32bit") == "32bit":
vtypes = {'_UNLOADED_DRIVER' : [ 24, {
'Name' : [ 0, ['_UNICODE_STRING']],
'StartAddress' : [ 8, ['address']],
'EndAddress' : [ 12, ['address']],
'CurrentTime' : [ 16, ['WinTimeStamp', {}]],
}]}
else:
vtypes = {'_UNLOADED_DRIVER' : [ 40, {
'Name' : [ 0, ['_UNICODE_STRING']],
'StartAddress' : [ 16, ['address']],
'EndAddress' : [ 24, ['address']],
'CurrentTime' : [ 32, ['WinTimeStamp', {}]],
}]}
profile.vtypes.update(vtypes)
kdbg_vtypes = {
'_DBGKD_DEBUG_DATA_HEADER64' : [ 0x18, {
'List' : [ 0x0, ['LIST_ENTRY64']],
'OwnerTag' : [ 0x10, ['unsigned long']],
'Size' : [ 0x14, ['unsigned long']],
} ],
'_KDDEBUGGER_DATA64' : [ 0x340, {
'Header' : [ 0x0, ['_DBGKD_DEBUG_DATA_HEADER64']],
'KernBase' : [ 0x18, ['unsigned long long']],
'BreakpointWithStatus' : [ 0x20, ['unsigned long long']],
'SavedContext' : [ 0x28, ['unsigned long long']],
'ThCallbackStack' : [ 0x30, ['unsigned short']],
'NextCallback' : [ 0x32, ['unsigned short']],
'FramePointer' : [ 0x34, ['unsigned short']],
'KiCallUserMode' : [ 0x38, ['unsigned long long']],
'KeUserCallbackDispatcher' : [ 0x40, ['unsigned long long']],
'PsLoadedModuleList' : [ 0x48, ['unsigned long long']],
'PsActiveProcessHead' : [ 0x50, ['unsigned long long']],
'PspCidTable' : [ 0x58, ['unsigned long long']],
'ExpSystemResourcesList' : [ 0x60, ['unsigned long long']],
'ExpPagedPoolDescriptor' : [ 0x68, ['unsigned long long']],
'ExpNumberOfPagedPools' : [ 0x70, ['unsigned long long']],
'KeTimeIncrement' : [ 0x78, ['unsigned long long']],
'KeBugCheckCallbackListHead' : [ 0x80, ['unsigned long long']],
'KiBugcheckData' : [ 0x88, ['unsigned long long']],
'IopErrorLogListHead' : [ 0x90, ['unsigned long long']],
'ObpRootDirectoryObject' : [ 0x98, ['unsigned long long']],
'ObpTypeObjectType' : [ 0xa0, ['unsigned long long']],
'MmSystemCacheStart' : [ 0xa8, ['unsigned long long']],
'MmSystemCacheEnd' : [ 0xb0, ['unsigned long long']],
'MmSystemCacheWs' : [ 0xb8, ['unsigned long long']],
'MmPfnDatabase' : [ 0xc0, ['unsigned long long']],
'MmSystemPtesStart' : [ 0xc8, ['unsigned long long']],
'MmSystemPtesEnd' : [ 0xd0, ['unsigned long long']],
'MmSubsectionBase' : [ 0xd8, ['unsigned long long']],
'MmNumberOfPagingFiles' : [ 0xe0, ['unsigned long long']],
'MmLowestPhysicalPage' : [ 0xe8, ['unsigned long long']],
'MmHighestPhysicalPage' : [ 0xf0, ['unsigned long long']],
'MmNumberOfPhysicalPages' : [ 0xf8, ['unsigned long long']],
'MmMaximumNonPagedPoolInBytes' : [ 0x100, ['unsigned long long']],
'MmNonPagedSystemStart' : [ 0x108, ['unsigned long long']],
'MmNonPagedPoolStart' : [ 0x110, ['unsigned long long']],
'MmNonPagedPoolEnd' : [ 0x118, ['unsigned long long']],
'MmPagedPoolStart' : [ 0x120, ['unsigned long long']],
'MmPagedPoolEnd' : [ 0x128, ['unsigned long long']],
'MmPagedPoolInformation' : [ 0x130, ['unsigned long long']],
'MmPageSize' : [ 0x138, ['unsigned long long']],
'MmSizeOfPagedPoolInBytes' : [ 0x140, ['unsigned long long']],
'MmTotalCommitLimit' : [ 0x148, ['unsigned long long']],
'MmTotalCommittedPages' : [ 0x150, ['unsigned long long']],
'MmSharedCommit' : [ 0x158, ['unsigned long long']],
'MmDriverCommit' : [ 0x160, ['unsigned long long']],
'MmProcessCommit' : [ 0x168, ['unsigned long long']],
'MmPagedPoolCommit' : [ 0x170, ['unsigned long long']],
'MmExtendedCommit' : [ 0x178, ['unsigned long long']],
'MmZeroedPageListHead' : [ 0x180, ['unsigned long long']],
'MmFreePageListHead' : [ 0x188, ['unsigned long long']],
'MmStandbyPageListHead' : [ 0x190, ['unsigned long long']],
'MmModifiedPageListHead' : [ 0x198, ['unsigned long long']],
'MmModifiedNoWritePageListHead' : [ 0x1a0, ['unsigned long long']],
'MmAvailablePages' : [ 0x1a8, ['unsigned long long']],
'MmResidentAvailablePages' : [ 0x1b0, ['unsigned long long']],
'PoolTrackTable' : [ 0x1b8, ['unsigned long long']],
'NonPagedPoolDescriptor' : [ 0x1c0, ['unsigned long long']],
'MmHighestUserAddress' : [ 0x1c8, ['unsigned long long']],
'MmSystemRangeStart' : [ 0x1d0, ['unsigned long long']],
'MmUserProbeAddress' : [ 0x1d8, ['unsigned long long']],
'KdPrintCircularBuffer' : [ 0x1e0, ['unsigned long long']],
'KdPrintCircularBufferEnd' : [ 0x1e8, ['unsigned long long']],
'KdPrintWritePointer' : [ 0x1f0, ['unsigned long long']],
'KdPrintRolloverCount' : [ 0x1f8, ['unsigned long long']],
'MmLoadedUserImageList' : [ 0x200, ['unsigned long long']],
'NtBuildLab' : [ 0x208, ['unsigned long long']],
'KiNormalSystemCall' : [ 0x210, ['unsigned long long']],
'KiProcessorBlock' : [ 0x218, ['unsigned long long']],
'MmUnloadedDrivers' : [ 0x220, ['unsigned long long']],
'MmLastUnloadedDriver' : [ 0x228, ['unsigned long long']],
'MmTriageActionTaken' : [ 0x230, ['unsigned long long']],
'MmSpecialPoolTag' : [ 0x238, ['unsigned long long']],
'KernelVerifier' : [ 0x240, ['unsigned long long']],
'MmVerifierData' : [ 0x248, ['unsigned long long']],
'MmAllocatedNonPagedPool' : [ 0x250, ['unsigned long long']],
'MmPeakCommitment' : [ 0x258, ['unsigned long long']],
'MmTotalCommitLimitMaximum' : [ 0x260, ['unsigned long long']],
'CmNtCSDVersion' : [ 0x268, ['unsigned long long']],
'MmPhysicalMemoryBlock' : [ 0x270, ['unsigned long long']],
'MmSessionBase' : [ 0x278, ['unsigned long long']],
'MmSessionSize' : [ 0x280, ['unsigned long long']],
'MmSystemParentTablePage' : [ 0x288, ['unsigned long long']],
'MmVirtualTranslationBase' : [ 0x290, ['unsigned long long']],
'OffsetKThreadNextProcessor' : [ 0x298, ['unsigned short']],
'OffsetKThreadTeb' : [ 0x29a, ['unsigned short']],
'OffsetKThreadKernelStack' : [ 0x29c, ['unsigned short']],
'OffsetKThreadInitialStack' : [ 0x29e, ['unsigned short']],
'OffsetKThreadApcProcess' : [ 0x2a0, ['unsigned short']],
'OffsetKThreadState' : [ 0x2a2, ['unsigned short']],
'OffsetKThreadBStore' : [ 0x2a4, ['unsigned short']],
'OffsetKThreadBStoreLimit' : [ 0x2a6, ['unsigned short']],
'SizeEProcess' : [ 0x2a8, ['unsigned short']],
'OffsetEprocessPeb' : [ 0x2aa, ['unsigned short']],
'OffsetEprocessParentCID' : [ 0x2ac, ['unsigned short']],
'OffsetEprocessDirectoryTableBase' : [ 0x2ae, ['unsigned short']],
'SizePrcb' : [ 0x2b0, ['unsigned short']],
'OffsetPrcbDpcRoutine' : [ 0x2b2, ['unsigned short']],
'OffsetPrcbCurrentThread' : [ 0x2b4, ['unsigned short']],
'OffsetPrcbMhz' : [ 0x2b6, ['unsigned short']],
'OffsetPrcbCpuType' : [ 0x2b8, ['unsigned short']],
'OffsetPrcbVendorString' : [ 0x2ba, ['unsigned short']],
'OffsetPrcbProcStateContext' : [ 0x2bc, ['unsigned short']],
'OffsetPrcbNumber' : [ 0x2be, ['unsigned short']],
'SizeEThread' : [ 0x2c0, ['unsigned short']],
'KdPrintCircularBufferPtr' : [ 0x2c8, ['unsigned long long']],
'KdPrintBufferSize' : [ 0x2d0, ['unsigned long long']],
'KeLoaderBlock' : [ 0x2d8, ['unsigned long long']],
'SizePcr' : [ 0x2e0, ['unsigned short']],
'OffsetPcrSelfPcr' : [ 0x2e2, ['unsigned short']],
'OffsetPcrCurrentPrcb' : [ 0x2e4, ['unsigned short']],
'OffsetPcrContainedPrcb' : [ 0x2e6, ['unsigned short']],
'OffsetPcrInitialBStore' : [ 0x2e8, ['unsigned short']],
'OffsetPcrBStoreLimit' : [ 0x2ea, ['unsigned short']],
'OffsetPcrInitialStack' : [ 0x2ec, ['unsigned short']],
'OffsetPcrStackLimit' : [ 0x2ee, ['unsigned short']],
'OffsetPrcbPcrPage' : [ 0x2f0, ['unsigned short']],
'OffsetPrcbProcStateSpecialReg' : [ 0x2f2, ['unsigned short']],
'GdtR0Code' : [ 0x2f4, ['unsigned short']],
'GdtR0Data' : [ 0x2f6, ['unsigned short']],
'GdtR0Pcr' : [ 0x2f8, ['unsigned short']],
'GdtR3Code' : [ 0x2fa, ['unsigned short']],
'GdtR3Data' : [ 0x2fc, ['unsigned short']],
'GdtR3Teb' : [ 0x2fe, ['unsigned short']],
'GdtLdt' : [ 0x300, ['unsigned short']],
'GdtTss' : [ 0x302, ['unsigned short']],
'Gdt64R3CmCode' : [ 0x304, ['unsigned short']],
'Gdt64R3CmTeb' : [ 0x306, ['unsigned short']],
'IopNumTriageDumpDataBlocks' : [ 0x308, ['unsigned long long']],
'IopTriageDumpDataBlocks' : [ 0x310, ['unsigned long long']],
'VfCrashDataBlock' : [ 0x318, ['unsigned long long']],
'MmBadPagesDetected' : [ 0x320, ['unsigned long long']],
'MmZeroedPageSingleBitErrorsDetected' : [ 0x328, ['unsigned long long']],
'EtwpDebuggerData' : [ 0x330, ['unsigned long long']],
'OffsetPrcbContext' : [ 0x338, ['unsigned short']],
} ],
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_snmp import ApiParameters
from library.modules.bigip_snmp import ModuleParameters
from library.modules.bigip_snmp import ModuleManager
from library.modules.bigip_snmp import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_snmp import ApiParameters
from ansible.modules.network.f5.bigip_snmp import ModuleParameters
from ansible.modules.network.f5.bigip_snmp import ModuleManager
from ansible.modules.network.f5.bigip_snmp import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
agent_status_traps='enabled',
agent_authentication_traps='enabled',
contact='Alice@foo.org',
device_warning_traps='enabled',
location='Lunar orbit',
password='password',
server='localhost',
user='admin'
)
p = ModuleParameters(params=args)
assert p.agent_status_traps == 'enabled'
assert p.agent_authentication_traps == 'enabled'
assert p.device_warning_traps == 'enabled'
assert p.location == 'Lunar orbit'
assert p.contact == 'Alice@foo.org'
def test_module_parameters_disabled(self):
args = dict(
agent_status_traps='disabled',
agent_authentication_traps='disabled',
device_warning_traps='disabled',
password='password',
server='localhost',
user='admin'
)
p = ModuleParameters(params=args)
assert p.agent_status_traps == 'disabled'
assert p.agent_authentication_traps == 'disabled'
assert p.device_warning_traps == 'disabled'
def test_api_parameters(self):
args = dict(
agentTrap='enabled',
authTrap='enabled',
bigipTraps='enabled',
sysLocation='Lunar orbit',
sysContact='Alice@foo.org',
)
p = ApiParameters(params=args)
assert p.agent_status_traps == 'enabled'
assert p.agent_authentication_traps == 'enabled'
assert p.device_warning_traps == 'enabled'
assert p.location == 'Lunar orbit'
assert p.contact == 'Alice@foo.org'
def test_api_parameters_disabled(self):
args = dict(
agentTrap='disabled',
authTrap='disabled',
bigipTraps='disabled',
)
p = ApiParameters(params=args)
assert p.agent_status_traps == 'disabled'
assert p.agent_authentication_traps == 'disabled'
assert p.device_warning_traps == 'disabled'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update_agent_status_traps(self, *args):
set_module_args(dict(
agent_status_traps='enabled',
password='password',
server='localhost',
user='admin'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
agent_status_traps='disabled'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['agent_status_traps'] == 'enabled'
def test_update_allowed_addresses(self, *args):
set_module_args(dict(
allowed_addresses=[
'127.0.0.0/8',
'10.10.10.10',
'foo',
'baz.foo.com'
],
password='password',
server='localhost',
user='admin'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allowed_addresses=['127.0.0.0/8']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert len(results['allowed_addresses']) == 4
assert results['allowed_addresses'] == [
'10.10.10.10', '127.0.0.0/8', 'baz.foo.com', 'foo'
]
def test_update_allowed_addresses_default(self, *args):
set_module_args(dict(
allowed_addresses=[
'default'
],
password='password',
server='localhost',
user='admin'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allowed_addresses=['10.0.0.0']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert len(results['allowed_addresses']) == 1
assert results['allowed_addresses'] == ['127.0.0.0/8']
def test_update_allowed_addresses_empty(self, *args):
set_module_args(dict(
allowed_addresses=[''],
password='password',
server='localhost',
user='admin'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allowed_addresses=['10.0.0.0']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert len(results['allowed_addresses']) == 1
assert results['allowed_addresses'] == ['127.0.0.0/8']
|
"""The virtual interfaces extension."""
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import network
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'virtual_interfaces')
vif_nsmap = {None: wsgi.XMLNS_V11}
class VirtualInterfaceTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('virtual_interfaces')
elem = xmlutil.SubTemplateElement(root, 'virtual_interface',
selector='virtual_interfaces')
elem.set('id')
elem.set('mac_address')
return xmlutil.MasterTemplate(root, 1, nsmap=vif_nsmap)
def _translate_vif_summary_view(_context, vif):
"""Maps keys for VIF summary view."""
d = {}
d['id'] = vif['uuid']
d['mac_address'] = vif['address']
return d
class ServerVirtualInterfaceController(object):
"""The instance VIF API controller for the OpenStack API.
"""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(ServerVirtualInterfaceController, self).__init__()
def _items(self, req, server_id, entity_maker):
"""Returns a list of VIFs, transformed through entity_maker."""
context = req.environ['nova.context']
instance = self.compute_api.get(context, server_id)
vifs = self.network_api.get_vifs_by_instance(context, instance)
limited_list = common.limited(vifs, req)
res = [entity_maker(context, vif) for vif in limited_list]
return {'virtual_interfaces': res}
@wsgi.serializers(xml=VirtualInterfaceTemplate)
def index(self, req, server_id):
"""Returns the list of VIFs for a given instance."""
authorize(req.environ['nova.context'])
return self._items(req, server_id,
entity_maker=_translate_vif_summary_view)
class Virtual_interfaces(extensions.ExtensionDescriptor):
"""Virtual interface support"""
name = "VirtualInterfaces"
alias = "os-virtual-interfaces"
namespace = ("http://docs.openstack.org/compute/ext/"
"virtual_interfaces/api/v1.1")
updated = "2011-08-17T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-virtual-interfaces',
controller=ServerVirtualInterfaceController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
|
'''
Unit tests for yedit
'''
import unittest
import os
from yedit import Yedit
from yedit import YeditException
class YeditTest(unittest.TestCase):
'''
Test class for yedit
'''
data = {'a': 'a',
'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
}
filename = 'yedit_test.yml'
def setUp(self):
''' setup method will create a file and set to known configuration '''
yed = Yedit(YeditTest.filename)
yed.yaml_dict = YeditTest.data
yed.write()
def test_load(self):
''' Testing a get '''
yed = Yedit('yedit_test.yml')
self.assertEqual(yed.yaml_dict, self.data)
def test_write(self):
''' Testing a simple write '''
yed = Yedit('yedit_test.yml')
yed.put('key1', 1)
yed.write()
self.assertTrue(yed.yaml_dict.has_key('key1'))
self.assertEqual(yed.yaml_dict['key1'], 1)
def test_write_x_y_z(self):
'''Testing a write of multilayer key'''
yed = Yedit('yedit_test.yml')
yed.put('x.y.z', 'modified')
yed.write()
yed.load()
self.assertEqual(yed.get('x.y.z'), 'modified')
def test_delete_a(self):
'''Testing a simple delete '''
yed = Yedit('yedit_test.yml')
yed.delete('a')
yed.write()
yed.load()
self.assertTrue(not yed.yaml_dict.has_key('a'))
def test_delete_b_c(self):
'''Testing delete of layered key '''
yed = Yedit('yedit_test.yml', separator=':')
yed.delete('b:c')
yed.write()
yed.load()
self.assertTrue(yed.yaml_dict.has_key('b'))
self.assertFalse(yed.yaml_dict['b'].has_key('c'))
def test_create(self):
'''Testing a create '''
os.unlink(YeditTest.filename)
yed = Yedit('yedit_test.yml')
yed.create('foo', 'bar')
yed.write()
yed.load()
self.assertTrue(yed.yaml_dict.has_key('foo'))
self.assertTrue(yed.yaml_dict['foo'] == 'bar')
def test_create_content(self):
'''Testing a create with content '''
content = {"foo": "bar"}
yed = Yedit("yedit_test.yml", content)
yed.write()
yed.load()
self.assertTrue(yed.yaml_dict.has_key('foo'))
self.assertTrue(yed.yaml_dict['foo'], 'bar')
def test_array_insert(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[0]') == 'inject')
def test_array_insert_first_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[1]') == 'f')
def test_array_insert_second_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[2]') == 'g')
def test_dict_array_dict_access(self):
'''Testing a create with content'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
def test_dict_array_dict_replace(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.put('b:c:d[0]:[0]:x:y', 'testing')
self.assertTrue(yed.yaml_dict.has_key('b'))
self.assertTrue(yed.yaml_dict['b'].has_key('c'))
self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'], 'testing')
def test_dict_array_dict_remove(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.delete('b:c:d[0]:[0]:x:y')
self.assertTrue(yed.yaml_dict.has_key('b'))
self.assertTrue(yed.yaml_dict['b'].has_key('c'))
self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertFalse(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
def test_key_exists_in_dict(self):
'''Testing exist in dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c', 'd'))
def test_key_exists_in_list(self):
'''Testing exist in list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
def test_update_to_list_with_index(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], index=2)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list_with_curr_value(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], curr_value=3)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list(self):
'''Testing update to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_append_twice_to_list(self):
'''Testing append to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.append('x:y:z', [5, 6])
yed.append('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
# pylint: disable=maybe-no-member
self.assertTrue(2 == yed.get('x:y:z').count([5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_add_item_to_dict(self):
'''Testing update to dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', {'a': 1, 'b': 2})
yed.update('x:y:z', {'c': 3, 'd': 4})
self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
self.assertTrue(yed.exists('x:y:z', {'c': 3}))
def test_first_level_dict_with_none_value(self):
'''test dict value with none value'''
yed = Yedit(content={'a': None}, separator=":")
yed.put('a:b:c', 'test')
self.assertTrue(yed.get('a:b:c') == 'test')
self.assertTrue(yed.get('a:b'), {'c': 'test'})
def test_adding_yaml_variable(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z:y', '{{test}}')
self.assertTrue(yed.get('z:y') == '{{test}}')
def test_keys_with_underscore(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z_:y_y', {'test': '{{test}}'})
self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
def test_first_level_array_update(self):
'''test update on top level array'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.update('', {'c': 4})
self.assertTrue({'c': 4} in yed.get(''))
def test_first_level_array_delete(self):
'''test remove top level key'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.delete('')
self.assertTrue({'b': 3} not in yed.get(''))
def test_first_level_array_get(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.get('')
self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.pop('', {'b': 2})
self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item_2(self):
'''test dict value with none value'''
z = range(10)
yed = Yedit(content=z, separator=':')
yed.pop('', 5)
z.pop(5)
self.assertTrue(z == yed.yaml_dict)
def test_pop_dict_key(self):
'''test dict value with none value'''
yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
yed.pop('a#b', 'c')
self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
def test_accessing_path_with_unexpected_objects(self):
'''test providing source path objects that differ from current object state'''
yed = Yedit(content={'a': {'b': {'c': ['d', 'e']}}})
with self.assertRaises(YeditException):
yed.put('a.b.c.d', 'x')
def test_creating_new_objects_with_embedded_list(self):
'''test creating new objects with an embedded list in the creation path'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff[0].here', 'value')
def test_creating_new_objects_with_trailing_list(self):
'''test creating new object(s) where the final piece is a list'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff.here[0]', 'item')
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
if __name__ == "__main__":
unittest.main()
|
from django.conf.urls import url
from oscar.core.application import DashboardApplication
from oscar.core.loading import get_class
class ShippingDashboardApplication(DashboardApplication):
name = None
default_permissions = ['is_staff']
weight_method_list_view = get_class(
'dashboard.shipping.views', 'WeightBasedListView')
weight_method_create_view = get_class(
'dashboard.shipping.views', 'WeightBasedCreateView')
weight_method_edit_view = get_class(
'dashboard.shipping.views', 'WeightBasedUpdateView')
weight_method_delete_view = get_class(
'dashboard.shipping.views', 'WeightBasedDeleteView')
# This doubles as the weight_band create view
weight_method_detail_view = get_class(
'dashboard.shipping.views', 'WeightBasedDetailView')
weight_band_edit_view = get_class(
'dashboard.shipping.views', 'WeightBandUpdateView')
weight_band_delete_view = get_class(
'dashboard.shipping.views', 'WeightBandDeleteView')
def get_urls(self):
urlpatterns = [
url(r'^weight-based/$', self.weight_method_list_view.as_view(),
name='shipping-method-list'),
url(r'^weight-based/create/$',
self.weight_method_create_view.as_view(),
name='shipping-method-create'),
url(r'^weight-based/(?P<pk>\d+)/$',
self.weight_method_detail_view.as_view(),
name='shipping-method-detail'),
url(r'^weight-based/(?P<pk>\d+)/edit/$',
self.weight_method_edit_view.as_view(),
name='shipping-method-edit'),
url(r'^weight-based/(?P<pk>\d+)/delete/$',
self.weight_method_delete_view.as_view(),
name='shipping-method-delete'),
url(r'^weight-based/(?P<method_pk>\d+)/bands/(?P<pk>\d+)/$',
self.weight_band_edit_view.as_view(),
name='shipping-method-band-edit'),
url(r'^weight-based/(?P<method_pk>\d+)/bands/(?P<pk>\d+)/delete/$',
self.weight_band_delete_view.as_view(),
name='shipping-method-band-delete'),
]
return self.post_process_urls(urlpatterns)
application = ShippingDashboardApplication()
|
"""Tests for the openmc.deplete.ReactionRates class."""
import numpy as np
from openmc.deplete import ReactionRates
def test_get_set():
"""Tests the get/set methods."""
local_mats = ["10000", "10001"]
nuclides = ["U238", "U235"]
reactions = ["fission", "(n,gamma)"]
rates = ReactionRates(local_mats, nuclides, reactions)
assert rates.shape == (2, 2, 2)
assert np.all(rates == 0.0)
rates.set("10000", "U238", "fission", 1.0)
rates.set("10001", "U238", "fission", 2.0)
rates.set("10000", "U235", "fission", 3.0)
rates.set("10001", "U235", "fission", 4.0)
rates.set("10000", "U238", "(n,gamma)", 5.0)
rates.set("10001", "U238", "(n,gamma)", 6.0)
rates.set("10000", "U235", "(n,gamma)", 7.0)
rates.set("10001", "U235", "(n,gamma)", 8.0)
# String indexing
assert rates.get("10000", "U238", "fission") == 1.0
assert rates.get("10001", "U238", "fission") == 2.0
assert rates.get("10000", "U235", "fission") == 3.0
assert rates.get("10001", "U235", "fission") == 4.0
assert rates.get("10000", "U238", "(n,gamma)") == 5.0
assert rates.get("10001", "U238", "(n,gamma)") == 6.0
assert rates.get("10000", "U235", "(n,gamma)") == 7.0
assert rates.get("10001", "U235", "(n,gamma)") == 8.0
# Int indexing
assert rates[0, 0, 0] == 1.0
assert rates[1, 0, 0] == 2.0
assert rates[0, 1, 0] == 3.0
assert rates[1, 1, 0] == 4.0
assert rates[0, 0, 1] == 5.0
assert rates[1, 0, 1] == 6.0
assert rates[0, 1, 1] == 7.0
assert rates[1, 1, 1] == 8.0
rates[0, 0, 0] = 5.0
assert rates[0, 0, 0] == 5.0
assert rates.get("10000", "U238", "fission") == 5.0
def test_properties():
"""Test number of materials property."""
local_mats = ["10000", "10001"]
nuclides = ["U238", "U235", "Gd157"]
reactions = ["fission", "(n,gamma)", "(n,2n)", "(n,3n)"]
rates = ReactionRates(local_mats, nuclides, reactions)
assert rates.n_mat == 2
assert rates.n_nuc == 3
assert rates.n_react == 4
|
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class ItsLovePlugin(WillPlugin):
@hear("i love(?: you,?)? will")
def hear_love(self, message):
self.say("I love you, too.", message=message)
@respond_to("i love you")
def hear_love_direct(self, message):
self.say("I love you, too.", message=message)
@hear("will is awesome")
def hear_i_am_awesome(self, message):
self.say("Aww, thanks!", message=message)
@respond_to("you(?: are|'re)? (?:awesome|rock)")
def hear_you_are_awesome(self, message):
self.say("Takes one to know one, %s." % message.sender.nick, message=message)
|
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, sync_blocks, sync_mempools
class WalletBackupTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
self.log.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, 'regtest', '.', 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', ''),
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
|
import mock
import os
import sys
import re
import json
import socorro.storage.crashstorage as cstore
import socorro.unittest.testlib.expectations as exp
import socorro.lib.util as util
import socorro.unittest.testlib.loggerForTest as loggerForTest
def testLegacyThrottler():
config = util.DotDict()
config.throttleConditions = [ ('alpha', re.compile('ALPHA'), 100),
('beta', 'BETA', 100),
('gamma', lambda x: x == 'GAMMA', 100),
('delta', True, 100),
(None, True, 0)
]
config.minimalVersionForUnderstandingRefusal = { 'product1': '3.5', 'product2': '4.0' }
config.neverDiscard = False
config.logger = util.SilentFakeLogger()
thr = cstore.LegacyThrottler(config)
expected = 5
actual = len(thr.processedThrottleConditions)
assert expected == actual, "expected thr.preprocessThrottleConditions to have length %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.0',
'alpha':'ALPHA',
})
expected = False
actual = thr.understandsRefusal(json1)
assert expected == actual, "understand refusal expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'alpha':'ALPHA',
})
expected = True
actual = thr.understandsRefusal(json1)
assert expected == actual, "understand refusal expected %d, but got %d instead" % (expected, actual)
expected = cstore.LegacyThrottler.ACCEPT
actual = thr.throttle(json1)
assert expected == actual, "regexp throttle expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.4',
'alpha':'not correct',
})
expected = cstore.LegacyThrottler.DEFER
actual = thr.throttle(json1)
assert expected == actual, "regexp throttle expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'alpha':'not correct',
})
expected = cstore.LegacyThrottler.DISCARD
actual = thr.throttle(json1)
assert expected == actual, "regexp throttle expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta':'BETA',
})
expected = cstore.LegacyThrottler.ACCEPT
actual = thr.throttle(json1)
assert expected == actual, "string equality throttle expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta':'not BETA',
})
expected = cstore.LegacyThrottler.DISCARD
actual = thr.throttle(json1)
assert expected == actual, "string equality throttle expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'gamma':'GAMMA',
})
expected = cstore.LegacyThrottler.ACCEPT
actual = thr.throttle(json1)
assert expected == actual, "string equality throttle expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'gamma':'not GAMMA',
})
expected = cstore.LegacyThrottler.DISCARD
actual = thr.throttle(json1)
assert expected == actual, "string equality throttle expected %d, but got %d instead" % (expected, actual)
json1 = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'delta':"value doesn't matter",
})
expected = cstore.LegacyThrottler.ACCEPT
actual = thr.throttle(json1)
assert expected == actual, "string equality throttle expected %d, but got %d instead" % (expected, actual)
# phase 2 tests
config = util.DotDict()
config.throttleConditions = [
('*', lambda x: 'alpha' in x, None),
('*', lambda x: x['beta'] == 'BETA', 100),
]
config.minimalVersionForUnderstandingRefusal = {
'product1': '3.5',
'product2': '4.0'
}
config.neverDiscard = True
config.logger = mock.Mock()
thr = cstore.LegacyThrottler(config)
expected = 2
actual = len(thr.processedThrottleConditions)
assert expected == actual, \
"expected thr.preprocessThrottleConditions to have length %d, but got " \
"%d instead" % (expected, actual)
raw_crash = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'ugh',
'alpha':"value doesn't matter",
})
expected = cstore.LegacyThrottler.IGNORE
actual = thr.throttle(raw_crash)
assert expected == actual, \
"IGNORE expected %d, but got %d instead" % \
(expected, actual)
raw_crash = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'ugh',
'delta':"value doesn't matter",
})
expected = cstore.LegacyThrottler.DEFER
actual = thr.throttle(raw_crash)
assert expected == actual, \
"DEFER expected %d, but got %d instead" % \
(expected, actual)
raw_crash = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'BETA',
'alpha':"value doesn't matter",
})
expected = cstore.LegacyThrottler.IGNORE
actual = thr.throttle(raw_crash)
assert expected == actual, \
"IGNORE expected %d, but got %d instead" % \
(expected, actual)
raw_crash = util.DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'BETA',
'delta':"value doesn't matter",
})
expected = cstore.LegacyThrottler.ACCEPT
actual = thr.throttle(raw_crash)
assert expected == actual, \
"ACCEPT expected %d, but got %d instead" % \
(expected, actual)
|
import sys
from pdfdevice import PDFTextDevice
from pdffont import PDFUnicodeNotDefined
from layout import LTContainer, LTPage, LTText, LTLine, LTRect, LTCurve
from layout import LTFigure, LTImage, LTChar, LTTextLine
from layout import LTTextBox, LTTextBoxVertical, LTTextGroup
from utils import apply_matrix_pt, mult_matrix
from utils import enc, bbox2str
class PDFLayoutAnalyzer(PDFTextDevice):
def __init__(self, rsrcmgr, pageno=1, laparams=None):
PDFTextDevice.__init__(self, rsrcmgr)
self.pageno = pageno
self.laparams = laparams
self._stack = []
return
def begin_page(self, page, ctm):
(x0, y0, x1, y1) = page.mediabox
(x0, y0) = apply_matrix_pt(ctm, (x0, y0))
(x1, y1) = apply_matrix_pt(ctm, (x1, y1))
mediabox = (0, 0, abs(x0-x1), abs(y0-y1))
self.cur_item = LTPage(self.pageno, mediabox)
return
def end_page(self, page):
assert not self._stack
assert isinstance(self.cur_item, LTPage)
if self.laparams is not None:
self.cur_item.analyze(self.laparams)
self.pageno += 1
self.receive_layout(self.cur_item)
return
def begin_figure(self, name, bbox, matrix):
self._stack.append(self.cur_item)
self.cur_item = LTFigure(name, bbox, mult_matrix(matrix, self.ctm))
return
def end_figure(self, _):
fig = self.cur_item
assert isinstance(self.cur_item, LTFigure)
self.cur_item = self._stack.pop()
self.cur_item.add(fig)
return
def render_image(self, name, stream):
assert isinstance(self.cur_item, LTFigure)
item = LTImage(name, stream,
(self.cur_item.x0, self.cur_item.y0,
self.cur_item.x1, self.cur_item.y1))
self.cur_item.add(item)
return
def paint_path(self, gstate, stroke, fill, evenodd, path):
shape = ''.join(x[0] for x in path)
if shape == 'ml':
# horizontal/vertical line
(_, x0, y0) = path[0]
(_, x1, y1) = path[1]
(x0, y0) = apply_matrix_pt(self.ctm, (x0, y0))
(x1, y1) = apply_matrix_pt(self.ctm, (x1, y1))
if x0 == x1 or y0 == y1:
self.cur_item.add(LTLine(gstate.linewidth, (x0, y0), (x1, y1)))
return
if shape == 'mlllh':
# rectangle
(_, x0, y0) = path[0]
(_, x1, y1) = path[1]
(_, x2, y2) = path[2]
(_, x3, y3) = path[3]
(x0, y0) = apply_matrix_pt(self.ctm, (x0, y0))
(x1, y1) = apply_matrix_pt(self.ctm, (x1, y1))
(x2, y2) = apply_matrix_pt(self.ctm, (x2, y2))
(x3, y3) = apply_matrix_pt(self.ctm, (x3, y3))
if ((x0 == x1 and y1 == y2 and x2 == x3 and y3 == y0) or
(y0 == y1 and x1 == x2 and y2 == y3 and x3 == x0)):
self.cur_item.add(LTRect(gstate.linewidth, (x0, y0, x2, y2)))
return
# other shapes
pts = []
for p in path:
for i in xrange(1, len(p), 2):
pts.append(apply_matrix_pt(self.ctm, (p[i], p[i+1])))
self.cur_item.add(LTCurve(gstate.linewidth, pts))
return
def render_char(self, matrix, font, fontsize, scaling, rise, cid):
try:
text = font.to_unichr(cid)
assert isinstance(text, unicode), text
except PDFUnicodeNotDefined:
text = self.handle_undefined_char(font, cid)
textwidth = font.char_width(cid)
textdisp = font.char_disp(cid)
item = LTChar(matrix, font, fontsize, scaling, rise, text, textwidth, textdisp)
self.cur_item.add(item)
return item.adv
def handle_undefined_char(self, font, cid):
if self.debug:
print >>sys.stderr, 'undefined: %r, %r' % (font, cid)
return '(cid:%d)' % cid
def receive_layout(self, ltpage):
return
class PDFPageAggregator(PDFLayoutAnalyzer):
def __init__(self, rsrcmgr, pageno=1, laparams=None):
PDFLayoutAnalyzer.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.result = None
return
def receive_layout(self, ltpage):
self.result = ltpage
return
def get_result(self):
return self.result
class PDFConverter(PDFLayoutAnalyzer):
def __init__(self, rsrcmgr, outfp, codec='utf-8', pageno=1, laparams=None):
PDFLayoutAnalyzer.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.outfp = outfp
self.codec = codec
return
class TextConverter(PDFConverter):
def __init__(self, rsrcmgr, outfp, codec='utf-8', pageno=1, laparams=None,
showpageno=False, imagewriter=None):
PDFConverter.__init__(self, rsrcmgr, outfp, codec=codec, pageno=pageno, laparams=laparams)
self.showpageno = showpageno
self.imagewriter = imagewriter
return
def write_text(self, text):
self.outfp.write(text.encode(self.codec, 'ignore'))
return
def receive_layout(self, ltpage):
def render(item):
if isinstance(item, LTContainer):
for child in item:
render(child)
elif isinstance(item, LTText):
self.write_text(item.get_text())
if isinstance(item, LTTextBox):
self.write_text('\n')
elif isinstance(item, LTImage):
if self.imagewriter is not None:
self.imagewriter.export_image(item)
if self.showpageno:
self.write_text('Page %s\n' % ltpage.pageid)
render(ltpage)
self.write_text('\f')
return
# Some dummy functions to save memory/CPU when all that is wanted
# is text. This stops all the image and drawing ouput from being
# recorded and taking up RAM.
def render_image(self, name, stream):
if self.imagewriter is None:
return
PDFConverter.render_image(self, name, stream)
return
def paint_path(self, gstate, stroke, fill, evenodd, path):
return
class HTMLConverter(PDFConverter):
RECT_COLORS = {
#'char': 'green',
'figure': 'yellow',
'textline': 'magenta',
'textbox': 'cyan',
'textgroup': 'red',
'curve': 'black',
'page': 'gray',
}
TEXT_COLORS = {
'textbox': 'blue',
'char': 'black',
}
def __init__(self, rsrcmgr, outfp, codec='utf-8', pageno=1, laparams=None,
scale=1, fontscale=1.0, layoutmode='normal', showpageno=True,
pagemargin=50, imagewriter=None,
rect_colors={'curve': 'black', 'page': 'gray'},
text_colors={'char': 'black'}):
PDFConverter.__init__(self, rsrcmgr, outfp, codec=codec, pageno=pageno, laparams=laparams)
self.scale = scale
self.fontscale = fontscale
self.layoutmode = layoutmode
self.showpageno = showpageno
self.pagemargin = pagemargin
self.imagewriter = imagewriter
self.rect_colors = rect_colors
self.text_colors = text_colors
if self.debug:
self.rect_colors.update(self.RECT_COLORS)
self.text_colors.update(self.TEXT_COLORS)
self._yoffset = self.pagemargin
self._font = None
self._fontstack = []
self.write_header()
return
def write(self, text):
self.outfp.write(text)
return
def write_header(self):
self.write('<html><head>\n')
self.write('<meta http-equiv="Content-Type" content="text/html; charset=%s">\n' % self.codec)
self.write('</head><body>\n')
return
def write_footer(self):
self.write('<div style="position:absolute; top:0px;">Page: %s</div>\n' %
', '.join('<a href="#%s">%s</a>' % (i, i) for i in xrange(1, self.pageno)))
self.write('</body></html>\n')
return
def write_text(self, text):
self.write(enc(text, self.codec))
return
def place_rect(self, color, borderwidth, x, y, w, h):
color = self.rect_colors.get(color)
if color is not None:
self.write('<span style="position:absolute; border: %s %dpx solid; '
'left:%dpx; top:%dpx; width:%dpx; height:%dpx;"></span>\n' %
(color, borderwidth,
x*self.scale, (self._yoffset-y)*self.scale,
w*self.scale, h*self.scale))
return
def place_border(self, color, borderwidth, item):
self.place_rect(color, borderwidth, item.x0, item.y1, item.width, item.height)
return
def place_image(self, item, borderwidth, x, y, w, h):
if self.imagewriter is not None:
name = self.imagewriter.export_image(item)
self.write('<img src="%s" border="%d" style="position:absolute; left:%dpx; top:%dpx;" '
'width="%d" height="%d" />\n' %
(enc(name), borderwidth,
x*self.scale, (self._yoffset-y)*self.scale,
w*self.scale, h*self.scale))
return
def place_text(self, color, text, x, y, size):
color = self.text_colors.get(color)
if color is not None:
self.write('<span style="position:absolute; color:%s; left:%dpx; top:%dpx; font-size:%dpx;">' %
(color, x*self.scale, (self._yoffset-y)*self.scale, size*self.scale*self.fontscale))
self.write_text(text)
self.write('</span>\n')
return
def begin_div(self, color, borderwidth, x, y, w, h, writing_mode=False):
self._fontstack.append(self._font)
self._font = None
self.write('<div style="position:absolute; border: %s %dpx solid; writing-mode:%s; '
'left:%dpx; top:%dpx; width:%dpx; height:%dpx;">' %
(color, borderwidth, writing_mode,
x*self.scale, (self._yoffset-y)*self.scale,
w*self.scale, h*self.scale))
return
def end_div(self, color):
if self._font is not None:
self.write('</span>')
self._font = self._fontstack.pop()
self.write('</div>')
return
def put_text(self, text, fontname, fontsize):
font = (fontname, fontsize)
if font != self._font:
if self._font is not None:
self.write('</span>')
self.write('<span style="font-family: %s; font-size:%dpx">' %
(fontname, fontsize * self.scale * self.fontscale))
self._font = font
self.write_text(text)
return
def put_newline(self):
self.write('<br>')
return
def receive_layout(self, ltpage):
def show_group(item):
if isinstance(item, LTTextGroup):
self.place_border('textgroup', 1, item)
for child in item:
show_group(child)
return
def render(item):
if isinstance(item, LTPage):
self._yoffset += item.y1
self.place_border('page', 1, item)
if self.showpageno:
self.write('<div style="position:absolute; top:%dpx;">' %
((self._yoffset-item.y1)*self.scale))
self.write('<a name="%s">Page %s</a></div>\n' % (item.pageid, item.pageid))
for child in item:
render(child)
if item.groups is not None:
for group in item.groups:
show_group(group)
elif isinstance(item, LTCurve):
self.place_border('curve', 1, item)
elif isinstance(item, LTFigure):
self.begin_div('figure', 1, item.x0, item.y1, item.width, item.height)
for child in item:
render(child)
self.end_div('figure')
elif isinstance(item, LTImage):
self.place_image(item, 1, item.x0, item.y1, item.width, item.height)
else:
if self.layoutmode == 'exact':
if isinstance(item, LTTextLine):
self.place_border('textline', 1, item)
for child in item:
render(child)
elif isinstance(item, LTTextBox):
self.place_border('textbox', 1, item)
self.place_text('textbox', str(item.index+1), item.x0, item.y1, 20)
for child in item:
render(child)
elif isinstance(item, LTChar):
self.place_border('char', 1, item)
self.place_text('char', item.get_text(), item.x0, item.y1, item.size)
else:
if isinstance(item, LTTextLine):
for child in item:
render(child)
if self.layoutmode != 'loose':
self.put_newline()
elif isinstance(item, LTTextBox):
self.begin_div('textbox', 1, item.x0, item.y1, item.width, item.height,
item.get_writing_mode())
for child in item:
render(child)
self.end_div('textbox')
elif isinstance(item, LTChar):
self.put_text(item.get_text(), item.fontname, item.size)
elif isinstance(item, LTText):
self.write_text(item.get_text())
return
render(ltpage)
self._yoffset += self.pagemargin
return
def close(self):
self.write_footer()
return
class XMLConverter(PDFConverter):
def __init__(self, rsrcmgr, outfp, codec='utf-8', pageno=1,
laparams=None, imagewriter=None):
PDFConverter.__init__(self, rsrcmgr, outfp, codec=codec, pageno=pageno, laparams=laparams)
self.imagewriter = imagewriter
self.write_header()
return
def write_header(self):
self.outfp.write('<?xml version="1.0" encoding="%s" ?>\n' % self.codec)
self.outfp.write('<pages>\n')
return
def write_footer(self):
self.outfp.write('</pages>\n')
return
def write_text(self, text):
self.outfp.write(enc(text, self.codec))
return
def receive_layout(self, ltpage):
def show_group(item):
if isinstance(item, LTTextBox):
self.outfp.write('<textbox id="%d" bbox="%s" />\n' %
(item.index, bbox2str(item.bbox)))
elif isinstance(item, LTTextGroup):
self.outfp.write('<textgroup bbox="%s">\n' % bbox2str(item.bbox))
for child in item:
show_group(child)
self.outfp.write('</textgroup>\n')
return
def render(item):
if isinstance(item, LTPage):
self.outfp.write('<page id="%s" bbox="%s" rotate="%d">\n' %
(item.pageid, bbox2str(item.bbox), item.rotate))
for child in item:
render(child)
if item.groups is not None:
self.outfp.write('<layout>\n')
for group in item.groups:
show_group(group)
self.outfp.write('</layout>\n')
self.outfp.write('</page>\n')
elif isinstance(item, LTLine):
self.outfp.write('<line linewidth="%d" bbox="%s" />\n' %
(item.linewidth, bbox2str(item.bbox)))
elif isinstance(item, LTRect):
self.outfp.write('<rect linewidth="%d" bbox="%s" />\n' %
(item.linewidth, bbox2str(item.bbox)))
elif isinstance(item, LTCurve):
self.outfp.write('<curve linewidth="%d" bbox="%s" pts="%s"/>\n' %
(item.linewidth, bbox2str(item.bbox), item.get_pts()))
elif isinstance(item, LTFigure):
self.outfp.write('<figure name="%s" bbox="%s">\n' %
(item.name, bbox2str(item.bbox)))
for child in item:
render(child)
self.outfp.write('</figure>\n')
elif isinstance(item, LTTextLine):
self.outfp.write('<textline bbox="%s">\n' % bbox2str(item.bbox))
for child in item:
render(child)
self.outfp.write('</textline>\n')
elif isinstance(item, LTTextBox):
wmode = ''
if isinstance(item, LTTextBoxVertical):
wmode = ' wmode="vertical"'
self.outfp.write('<textbox id="%d" bbox="%s"%s>\n' %
(item.index, bbox2str(item.bbox), wmode))
for child in item:
render(child)
self.outfp.write('</textbox>\n')
elif isinstance(item, LTChar):
self.outfp.write('<text font="%s" bbox="%s" size="%.3f">' %
(enc(item.fontname), bbox2str(item.bbox), item.size))
self.write_text(item.get_text())
self.outfp.write('</text>\n')
elif isinstance(item, LTText):
self.outfp.write('<text>%s</text>\n' % item.get_text())
elif isinstance(item, LTImage):
if self.imagewriter is not None:
name = self.imagewriter.export_image(item)
self.outfp.write('<image src="%s" width="%d" height="%d" />\n' %
(enc(name), item.width, item.height))
else:
self.outfp.write('<image width="%d" height="%d" />\n' %
(item.width, item.height))
else:
assert 0, item
return
render(ltpage)
return
def close(self):
self.write_footer()
return
|
"""Starter script for Nova Cells Service."""
import sys
from oslo.config import cfg
from nova import config
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
CONF.import_opt('manager', 'nova.cells.opts', group='cells')
def main():
config.parse_args(sys.argv)
logging.setup('nova')
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='nova-cells',
topic=CONF.cells.topic,
manager=CONF.cells.manager)
service.serve(server)
service.wait()
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
('status', models.IntegerField(default=0)),
('content', models.TextField()),
('publish_date', models.DateTimeField(auto_now=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(max_length=4096)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
]
|
"""
Fuzz distortion. High gain followed by, asymmetrical clipping,
hard clip on top, soft compression on bottom.
"""
from pyo import *
s = Server(duplex=0).boot()
SOURCE = "../snds/flute.aif"
BP_CENTER_FREQ = 250
BP_Q = 2
BOOST = 5
LP_CUTOFF_FREQ = 3500
BALANCE = 0.8
src = SfPlayer(SOURCE, loop=True).mix(2)
low_table = ExpTable([(0,-.25),(4096,0),(8192,0)], exp=10)
high_table = CosTable([(0,0),(4096,0),(4598,1),(8192,1)])
bp = Biquad(src, freq=BP_CENTER_FREQ, q=BP_Q, type=2)
boost = Sig(bp, mul=BOOST)
sign = Compare(boost, comp=0, mode=">=")
sw = Switch(boost, outs=2, voice=sign)
lowsig = Lookup(low_table, sw[0])
highsig = Lookup(high_table, sw[1])
lp = Tone(lowsig+highsig, freq=LP_CUTOFF_FREQ, mul=.3)
out = Interp(src, lp, interp=BALANCE).out()
s.gui(locals())
|
from __future__ import absolute_import
import pytest
from ansible.modules.source_control.gitlab.gitlab_hook import GitLabHook
def _dummy(x):
"""Dummy function. Only used as a placeholder for toplevel definitions when the test is going
to be skipped anyway"""
return x
pytestmark = []
try:
from .gitlab import (GitlabModuleTestCase,
python_version_match_requirement,
resp_get_project, resp_find_project_hook,
resp_create_project_hook, resp_delete_project_hook)
# GitLab module requirements
if python_version_match_requirement():
from gitlab.v4.objects import ProjectHook
except ImportError:
pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
# Need to set these to something so that we don't fail when parsing
GitlabModuleTestCase = object
resp_get_project = _dummy
resp_find_project_hook = _dummy
resp_create_project_hook = _dummy
resp_delete_project_hook = _dummy
try:
from httmock import with_httmock # noqa
except ImportError:
pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
with_httmock = _dummy
class TestGitlabHook(GitlabModuleTestCase):
def setUp(self):
super(TestGitlabHook, self).setUp()
self.moduleUtil = GitLabHook(module=self.mock_module, gitlab_instance=self.gitlab_instance)
@with_httmock(resp_get_project)
@with_httmock(resp_find_project_hook)
def test_hook_exist(self):
project = self.gitlab_instance.projects.get(1)
rvalue = self.moduleUtil.existsHook(project, "http://example.com/hook")
self.assertEqual(rvalue, True)
rvalue = self.moduleUtil.existsHook(project, "http://gitlab.com/hook")
self.assertEqual(rvalue, False)
@with_httmock(resp_get_project)
@with_httmock(resp_create_project_hook)
def test_create_hook(self):
project = self.gitlab_instance.projects.get(1)
hook = self.moduleUtil.createHook(project, {"url": "http://example.com/hook"})
self.assertEqual(type(hook), ProjectHook)
self.assertEqual(hook.url, "http://example.com/hook")
@with_httmock(resp_get_project)
@with_httmock(resp_find_project_hook)
def test_update_hook(self):
project = self.gitlab_instance.projects.get(1)
hook = self.moduleUtil.findHook(project, "http://example.com/hook")
changed, newHook = self.moduleUtil.updateHook(hook, {"url": "http://gitlab.com/hook"})
self.assertEqual(changed, True)
self.assertEqual(type(newHook), ProjectHook)
self.assertEqual(newHook.url, "http://gitlab.com/hook")
changed, newHook = self.moduleUtil.updateHook(hook, {"url": "http://gitlab.com/hook"})
self.assertEqual(changed, False)
self.assertEqual(newHook.url, "http://gitlab.com/hook")
@with_httmock(resp_get_project)
@with_httmock(resp_find_project_hook)
@with_httmock(resp_delete_project_hook)
def test_delete_hook(self):
project = self.gitlab_instance.projects.get(1)
self.moduleUtil.existsHook(project, "http://example.com/hook")
rvalue = self.moduleUtil.deleteHook()
self.assertEqual(rvalue, None)
|
def func():
value = "not-none"
if value is None:
prin<caret>t("None")
else:
print("Not none")
|
from easydict import EasyDict
from lib.curator_action import CuratorAction
import logging
logger = logging.getLogger(__name__)
class CuratorRunner(CuratorAction):
def run(self, action=None, log_level='warn', dry_run=False, operation_timeout=600, **kwargs):
"""Curator based action entry point
"""
self._action = action
kwargs.update({
'timeout': int(operation_timeout),
'log_level': log_level,
'dry_run': dry_run,
})
self.config = EasyDict(kwargs)
self.set_up_logging()
self.do_command()
|
"""Unit tests for Web Development Style Guide checker."""
import os
import re
import sys
import unittest
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.extend([
os.path.normpath(os.path.join(test_dir, '..', '..', '..', 'tools')),
os.path.join(test_dir),
])
import find_depot_tools # pylint: disable=W0611
from testing_support.super_mox import SuperMoxTestBase
from web_dev_style import css_checker, js_checker # pylint: disable=F0401
class JsStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
input_api = self.mox.CreateMockAnything()
input_api.re = re
output_api = self.mox.CreateMockAnything()
self.checker = js_checker.JSChecker(input_api, output_api)
def GetHighlight(self, line, error):
"""Returns the substring of |line| that is highlighted in |error|."""
error_lines = error.split('\n')
highlight = error_lines[error_lines.index(line) + 1]
return ''.join(ch1 for (ch1, ch2) in zip(line, highlight) if ch2 == '^')
def ShouldFailConstCheck(self, line):
"""Checks that the 'const' checker flags |line| as a style error."""
error = self.checker.ConstCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'const')
def ShouldPassConstCheck(self, line):
"""Checks that the 'const' checker doesn't flag |line| as a style error."""
self.assertEqual('', self.checker.ConstCheck(1, line),
'Should not be flagged as style error: ' + line)
def testConstFails(self):
lines = [
"const foo = 'bar';",
" const bar = 'foo';",
# Trying to use |const| as a variable name
"var const = 0;",
"var x = 5; const y = 6;",
"for (var i=0, const e=10; i<e; i++) {",
"for (const x=0; x<foo; i++) {",
"while (const x = 7) {",
]
for line in lines:
self.ShouldFailConstCheck(line)
def testConstPasses(self):
lines = [
# sanity check
"var foo = 'bar'",
# @const JsDoc tag
"/** @const */ var SEVEN = 7;",
# @const tag in multi-line comment
" * @const",
" * @const",
# @constructor tag in multi-line comment
" * @constructor",
" * @constructor",
# words containing 'const'
"if (foo.constructor) {",
"var deconstruction = 'something';",
"var madeUpWordconst = 10;",
# Strings containing the word |const|
"var str = 'const at the beginning';",
"var str = 'At the end: const';",
# doing this one with regex is probably not practical
#"var str = 'a const in the middle';",
]
for line in lines:
self.ShouldPassConstCheck(line)
def ShouldFailChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker flags |line| as a style error."""
error = self.checker.ChromeSendCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), ', []')
def ShouldPassChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.ChromeSendCheck(1, line),
'Should not be flagged as style error: ' + line)
def testChromeSendFails(self):
lines = [
"chrome.send('message', []);",
" chrome.send('message', []);",
]
for line in lines:
self.ShouldFailChromeSendCheck(line)
def testChromeSendPasses(self):
lines = [
"chrome.send('message', constructArgs('foo', []));",
" chrome.send('message', constructArgs('foo', []));",
"chrome.send('message', constructArgs([]));",
" chrome.send('message', constructArgs([]));",
]
for line in lines:
self.ShouldPassChromeSendCheck(line)
def ShouldFailGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker flags |line| as a style
error.
"""
error = self.checker.GetElementByIdCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'document.getElementById')
def ShouldPassGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.GetElementByIdCheck(1, line),
'Should not be flagged as style error: ' + line)
def testGetElementByIdFails(self):
lines = [
"document.getElementById('foo');",
" document.getElementById('foo');",
"var x = document.getElementById('foo');",
"if (document.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldFailGetElementByIdCheck(line)
def testGetElementByIdPasses(self):
lines = [
"elem.ownerDocument.getElementById('foo');",
" elem.ownerDocument.getElementById('foo');",
"var x = elem.ownerDocument.getElementById('foo');",
"if (elem.ownerDocument.getElementById('foo').hidden) {",
"doc.getElementById('foo');",
" doc.getElementById('foo');",
"cr.doc.getElementById('foo');",
" cr.doc.getElementById('foo');",
"var x = doc.getElementById('foo');",
"if (doc.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldPassGetElementByIdCheck(line)
class CssStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.fake_file_name = 'fake.css'
self.fake_file = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.fake_file, 'LocalPath')
self.fake_file.LocalPath().AndReturn(self.fake_file_name)
# Actual calls to NewContents() are defined in each test.
self.mox.StubOutWithMock(self.fake_file, 'NewContents')
self.input_api = self.mox.CreateMockAnything()
self.input_api.re = re
self.mox.StubOutWithMock(self.input_api, 'AffectedSourceFiles')
self.input_api.AffectedFiles(
include_deletes=False, file_filter=None).AndReturn([self.fake_file])
# Actual creations of PresubmitPromptWarning are defined in each test.
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitPromptWarning',
use_mock_anything=True)
author_msg = ('Was the CSS checker useful? '
'Send feedback or hate mail to dbeam@chromium.org.')
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitNotifyResult',
use_mock_anything=True)
self.output_api.PresubmitNotifyResult(author_msg).AndReturn(None)
def VerifyContentsProducesOutput(self, contents, output):
self.fake_file.NewContents().AndReturn(contents.splitlines())
self.output_api.PresubmitPromptWarning(
self.fake_file_name + ':\n' + output.strip()).AndReturn(None)
self.mox.ReplayAll()
css_checker.CSSChecker(self.input_api, self.output_api).RunChecks()
def testCssAlphaWithAtBlock(self):
self.VerifyContentsProducesOutput("""
<include src="../shared/css/cr/ui/overlay.css">
<include src="chrome://resources/totally-cool.css" />
/* A hopefully safely ignored comment and @media statement. /**/
@media print {
div {
display: block;
color: red;
}
}
.rule {
z-index: 5;
<if expr="not is macosx">
background-image: url(chrome://resources/BLAH); /* TODO(dbeam): Fix this. */
background-color: rgb(235, 239, 249);
</if>
<if expr="is_macosx">
background-color: white;
background-image: url(chrome://resources/BLAH2);
</if>
color: black;
}
<if expr="is_macosx">
.language-options-right {
visibility: hidden;
opacity: 1; /* TODO(dbeam): Fix this. */
}
</if>""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
display: block;
color: red;
z-index: 5;
color: black;""")
def testCssAlphaWithNonStandard(self):
self.VerifyContentsProducesOutput("""
div {
/* A hopefully safely ignored comment and @media statement. /**/
color: red;
-webkit-margin-start: 5px;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
color: red;
-webkit-margin-start: 5px;""")
def testCssAlphaWithLongerDashedProps(self):
self.VerifyContentsProducesOutput("""
div {
border-left: 5px; /* A hopefully removed comment. */
border: 5px solid red;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
border-left: 5px;
border: 5px solid red;""")
def testCssBracesHaveSpaceBeforeAndNothingAfter(self):
self.VerifyContentsProducesOutput("""
/* Hello! */div/* Comment here*/{
display: block;
}
blah /* hey! */
{
rule: value;
}
.this.is { /* allowed */
rule: value;
}""", """
- Start braces ({) end a selector, have a space before them and no rules after.
div{
{""")
def testCssClassesUseDashes(self):
self.VerifyContentsProducesOutput("""
.className,
.ClassName,
.class-name /* We should not catch this. */,
.class_name {
display: block;
}""", """
- Classes use .dash-form.
.className,
.ClassName,
.class_name {""")
def testCssCloseBraceOnNewLine(self):
self.VerifyContentsProducesOutput("""
@media { /* TODO(dbeam) Fix this case. */
.rule {
display: block;
}}
@-webkit-keyframe blah {
100% { height: -500px 0; }
}
rule: value; }""", """
- Always put a rule closing brace (}) on a new line.
rule: value; }""")
def testCssColonsHaveSpaceAfter(self):
self.VerifyContentsProducesOutput("""
div:not(.class):not([attr=5]), /* We should not catch this. */
div:not(.class):not([attr]) /* Nor this. */ {
background: url(data:image/jpeg,asdfasdfsadf); /* Ignore this. */
background: -webkit-linear-gradient(left, red,
80% blah blee blar);
color: red;
display:block;
}""", """
- Colons (:) should have a space after them.
display:block;
- Don't use data URIs in source files. Use grit instead.
background: url(data:image/jpeg,asdfasdfsadf);""")
def testCssFavorSingleQuotes(self):
self.VerifyContentsProducesOutput("""
html[dir="rtl"] body,
html[dir=ltr] body /* TODO(dbeam): Require '' around rtl in future? */ {
background: url("chrome://resources/BLAH");
font-family: "Open Sans";
<if expr="is_macosx">
blah: blee;
</if>
}""", """
- Use single quotes (') instead of double quotes (") in strings.
html[dir="rtl"] body,
background: url("chrome://resources/BLAH");
font-family: "Open Sans";""")
def testCssHexCouldBeShorter(self):
self.VerifyContentsProducesOutput("""
background-color: #336699; /* Ignore short hex rule if not gray. */
color: #999999;
color: #666;
}""", """
- Use abbreviated hex (#rgb) when in form #rrggbb.
color: #999999; (replace with #999)
- Use rgb() over #hex when not a shade of gray (like #333).
background-color: #336699; (replace with rgb(51, 102, 153))""")
def testCssUseMillisecondsForSmallTimes(self):
self.VerifyContentsProducesOutput("""
.transition-0s /* This is gross but may happen. */ {
transform: one 0.2s;
transform: two .1s;
transform: tree 1s;
transform: four 300ms;
}""", """
- Use milliseconds for time measurements under 1 second.
transform: one 0.2s; (replace with 200ms)
transform: two .1s; (replace with 100ms)""")
def testCssNoDataUrisInSourceFiles(self):
self.VerifyContentsProducesOutput("""
img {
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');
}""", """
- Don't use data URIs in source files. Use grit instead.
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');""")
def testCssOneRulePerLine(self):
self.VerifyContentsProducesOutput("""
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type,
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type ~
input[type='checkbox']:not([hidden]),
div {
background: url(chrome://resources/BLAH);
rule: value; /* rule: value; */
rule: value; rule: value;
}""", """
- One rule per line (what not to do: color: red; margin: 0;).
rule: value; rule: value;""")
def testCssOneSelectorPerLine(self):
self.VerifyContentsProducesOutput("""
a,
div,a,
div,/* Hello! */ span,
rule: value;
}
a,
div,a {
some-other: rule here;
}""", """
- One selector per line (what not to do: a, b {}).
div,a,
div, span,
div,a {""")
def testCssRgbIfNotGray(self):
self.VerifyContentsProducesOutput("""
background: -webkit-linear-gradient(left, from(#abc), to(#def));
color: #bad;
color: #bada55;
}""", """
- Use rgb() over #hex when not a shade of gray (like #333).
background: -webkit-linear-gradient(left, from(#abc), to(#def)); """
"""(replace with rgb(170, 187, 204), rgb(221, 238, 255))
color: #bad; (replace with rgb(187, 170, 221))
color: #bada55; (replace with rgb(186, 218, 85))""")
def testCssZeroLengthTerms(self):
self.VerifyContentsProducesOutput("""
@-webkit-keyframe anim {
0% { /* Ignore key frames */
width: 0px;
}
10% {
width: 10px;
}
100% {
width: 100px;
}
}
.media-button.play > .state0.active,
.media-button[state='0'] > .state0.normal /* blah */, /* blee */
.media-button[state='0']:not(.disabled):hover > .state0.hover {
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
color: hsl(0, 0%, 85%); /* Shouldn't trigger error. */
opacity: .0;
opacity: 0.0;
opacity: 0.;
}
@page {
border-width: 0mm;
height: 0cm;
width: 0in;
}""", """
- Make all zero length terms (i.e. 0px) 0 unless inside of hsl() or part of"""
""" @keyframe.
width: 0px;
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
opacity: .0;
opacity: 0.0;
opacity: 0.;
border-width: 0mm;
height: 0cm;
width: 0in;
""")
if __name__ == '__main__':
unittest.main()
|
"""
Scrapy Shell
See documentation in docs/topics/shell.rst
"""
from threading import Thread
from scrapy.commands import ScrapyCommand
from scrapy.shell import Shell
from scrapy.http import Request
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
from scrapy.utils.url import guess_scheme
class Command(ScrapyCommand):
requires_project = False
default_settings = {
'KEEP_ALIVE': True,
'LOGSTATS_INTERVAL': 0,
'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter',
}
def syntax(self):
return "[url|file]"
def short_desc(self):
return "Interactive scraping console"
def long_desc(self):
return "Interactive console for scraping the given url"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-c", dest="code",
help="evaluate the code in the shell, print the result and exit")
parser.add_option("--spider", dest="spider",
help="use this spider")
def update_vars(self, vars):
"""You can use this function to update the Scrapy objects that will be
available in the shell
"""
pass
def run(self, args, opts):
url = args[0] if args else None
if url:
# first argument may be a local file
url = guess_scheme(url)
spider_loader = self.crawler_process.spider_loader
spidercls = DefaultSpider
if opts.spider:
spidercls = spider_loader.load(opts.spider)
elif url:
spidercls = spidercls_for_request(spider_loader, Request(url),
spidercls, log_multiple=True)
# The crawler is created this way since the Shell manually handles the
# crawling engine, so the set up in the crawl method won't work
crawler = self.crawler_process._create_crawler(spidercls)
# The Shell class needs a persistent engine in the crawler
crawler.engine = crawler._create_engine()
crawler.engine.start()
self._start_crawler_thread()
shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
shell.start(url=url)
def _start_crawler_thread(self):
t = Thread(target=self.crawler_process.start,
kwargs={'stop_after_crawl': False})
t.daemon = True
t.start()
|
from __future__ import unicode_literals
import warnings
from django.contrib.localflavor.id.forms import (IDPhoneNumberField,
IDPostCodeField, IDNationalIdentityNumberField, IDLicensePlateField,
IDProvinceSelect, IDLicensePlatePrefixSelect)
from django.test import SimpleTestCase
class IDLocalFlavorTests(SimpleTestCase):
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings(
"ignore",
category=RuntimeWarning,
module='django.contrib.localflavor.id.id_choices'
)
def tearDown(self):
self.restore_warnings_state()
def test_IDProvinceSelect(self):
f = IDProvinceSelect()
out = '''<select name="provinces">
<option value="ACE">Aceh</option>
<option value="BLI">Bali</option>
<option value="BTN">Banten</option>
<option value="BKL">Bengkulu</option>
<option value="DIY">Yogyakarta</option>
<option value="JKT">Jakarta</option>
<option value="GOR">Gorontalo</option>
<option value="JMB">Jambi</option>
<option value="JBR">Jawa Barat</option>
<option value="JTG">Jawa Tengah</option>
<option value="JTM">Jawa Timur</option>
<option value="KBR">Kalimantan Barat</option>
<option value="KSL">Kalimantan Selatan</option>
<option value="KTG">Kalimantan Tengah</option>
<option value="KTM">Kalimantan Timur</option>
<option value="BBL">Kepulauan Bangka-Belitung</option>
<option value="KRI">Kepulauan Riau</option>
<option value="LPG" selected="selected">Lampung</option>
<option value="MLK">Maluku</option>
<option value="MUT">Maluku Utara</option>
<option value="NTB">Nusa Tenggara Barat</option>
<option value="NTT">Nusa Tenggara Timur</option>
<option value="PPA">Papua</option>
<option value="PPB">Papua Barat</option>
<option value="RIU">Riau</option>
<option value="SLB">Sulawesi Barat</option>
<option value="SLS">Sulawesi Selatan</option>
<option value="SLT">Sulawesi Tengah</option>
<option value="SLR">Sulawesi Tenggara</option>
<option value="SLU">Sulawesi Utara</option>
<option value="SMB">Sumatera Barat</option>
<option value="SMS">Sumatera Selatan</option>
<option value="SMU">Sumatera Utara</option>
</select>'''
self.assertHTMLEqual(f.render('provinces', 'LPG'), out)
def test_IDLicensePlatePrefixSelect(self):
f = IDLicensePlatePrefixSelect()
out = '''<select name="codes">
<option value="A">Banten</option>
<option value="AA">Magelang</option>
<option value="AB">Yogyakarta</option>
<option value="AD">Surakarta - Solo</option>
<option value="AE">Madiun</option>
<option value="AG">Kediri</option>
<option value="B">Jakarta</option>
<option value="BA">Sumatera Barat</option>
<option value="BB">Tapanuli</option>
<option value="BD">Bengkulu</option>
<option value="BE" selected="selected">Lampung</option>
<option value="BG">Sumatera Selatan</option>
<option value="BH">Jambi</option>
<option value="BK">Sumatera Utara</option>
<option value="BL">Nanggroe Aceh Darussalam</option>
<option value="BM">Riau</option>
<option value="BN">Kepulauan Bangka Belitung</option>
<option value="BP">Kepulauan Riau</option>
<option value="CC">Corps Consulate</option>
<option value="CD">Corps Diplomatic</option>
<option value="D">Bandung</option>
<option value="DA">Kalimantan Selatan</option>
<option value="DB">Sulawesi Utara Daratan</option>
<option value="DC">Sulawesi Barat</option>
<option value="DD">Sulawesi Selatan</option>
<option value="DE">Maluku</option>
<option value="DG">Maluku Utara</option>
<option value="DH">NTT - Timor</option>
<option value="DK">Bali</option>
<option value="DL">Sulawesi Utara Kepulauan</option>
<option value="DM">Gorontalo</option>
<option value="DN">Sulawesi Tengah</option>
<option value="DR">NTB - Lombok</option>
<option value="DS">Papua dan Papua Barat</option>
<option value="DT">Sulawesi Tenggara</option>
<option value="E">Cirebon</option>
<option value="EA">NTB - Sumbawa</option>
<option value="EB">NTT - Flores</option>
<option value="ED">NTT - Sumba</option>
<option value="F">Bogor</option>
<option value="G">Pekalongan</option>
<option value="H">Semarang</option>
<option value="K">Pati</option>
<option value="KB">Kalimantan Barat</option>
<option value="KH">Kalimantan Tengah</option>
<option value="KT">Kalimantan Timur</option>
<option value="L">Surabaya</option>
<option value="M">Madura</option>
<option value="N">Malang</option>
<option value="P">Jember</option>
<option value="R">Banyumas</option>
<option value="RI">Federal Government</option>
<option value="S">Bojonegoro</option>
<option value="T">Purwakarta</option>
<option value="W">Sidoarjo</option>
<option value="Z">Garut</option>
</select>'''
self.assertHTMLEqual(f.render('codes', 'BE'), out)
def test_IDPhoneNumberField(self):
error_invalid = ['Enter a valid phone number']
valid = {
'0812-3456789': '0812-3456789',
'081234567890': '081234567890',
'021 345 6789': '021 345 6789',
'0213456789': '0213456789',
'+62-21-3456789': '+62-21-3456789',
'(021) 345 6789': '(021) 345 6789',
}
invalid = {
'0123456789': error_invalid,
'+62-021-3456789': error_invalid,
'+62-0812-3456789': error_invalid,
'0812345678901': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPhoneNumberField, valid, invalid)
def test_IDPostCodeField(self):
error_invalid = ['Enter a valid post code']
valid = {
'12340': '12340',
'25412': '25412',
' 12340 ': '12340',
}
invalid = {
'12 3 4 0': error_invalid,
'12345': error_invalid,
'10100': error_invalid,
'123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPostCodeField, valid, invalid)
def test_IDNationalIdentityNumberField(self):
error_invalid = ['Enter a valid NIK/KTP number']
valid = {
' 12.3456.010178 3456 ': '12.3456.010178.3456',
'1234560101783456': '12.3456.010178.3456',
'12.3456.010101.3456': '12.3456.010101.3456',
}
invalid = {
'12.3456.310278.3456': error_invalid,
'00.0000.010101.0000': error_invalid,
'1234567890123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDNationalIdentityNumberField, valid, invalid)
def test_IDLicensePlateField(self):
error_invalid = ['Enter a valid vehicle license plate number']
valid = {
' b 1234 ab ': 'B 1234 AB',
'B 1234 ABC': 'B 1234 ABC',
'A 12': 'A 12',
'DK 12345 12': 'DK 12345 12',
'RI 10': 'RI 10',
'CD 12 12': 'CD 12 12',
}
invalid = {
'CD 10 12': error_invalid,
'CD 1234 12': error_invalid,
'RI 10 AB': error_invalid,
'B 12345 01': error_invalid,
'N 1234 12': error_invalid,
'A 12 XYZ': error_invalid,
'Q 1234 AB': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDLicensePlateField, valid, invalid)
|
"""Import all modules in the `ragged` package that define exported symbols.
Additional, import ragged_dispatch (which has the side-effect of registering
dispatch handlers for many standard TF ops) and ragged_operators (which has the
side-effect of overriding RaggedTensor operators, such as RaggedTensor.__add__).
We don't import these modules from ragged/__init__.py, since we want to avoid
circular dependencies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_dispatch
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_getitem
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_operators
from tensorflow.python.ops.ragged import ragged_squeeze_op
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_shape
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_where_op
from tensorflow.python.ops.ragged import segment_id_ops
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Tidbit.content'
db.alter_column('auxiliary_tidbit', 'content', self.gf('tinymce.models.HTMLField')())
def backwards(self, orm):
# Changing field 'Tidbit.content'
db.alter_column('auxiliary_tidbit', 'content', self.gf('django.db.models.fields.TextField')())
models = {
'auxiliary.tidbit': {
'Meta': {'object_name': 'Tidbit'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'button_link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'button_text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content': ('tinymce.models.HTMLField', [], {}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '20', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "u'Did you know ?'", 'max_length': '40'})
}
}
complete_apps = ['auxiliary']
|
"""
Layer on top of the upload APIs to perform many of the common client-side tasks
around uploading a content unit, such as the ability to resume a cancelled
download, uploading a file in multiple chunks instead of a single call, and
client-side tracking of upload requests on the server.
"""
import copy
import errno
import os
import pickle
from pulp.common.lock import LockFile
DEFAULT_CHUNKSIZE = 1048576 # 1 MB per upload call
class ManagerUninitializedException(Exception):
"""
Raised in the event the manager is used before it is initialized.
"""
pass
class MissingUploadRequestException(Exception):
"""
Raised in the event that an attempt is made on an upload request that
does not exist, either its local tracker file or as indicated that it is
no longer available on the server.
"""
pass
class IncompleteUploadException(Exception):
"""
Raised when attempting to import an upload that has not completed uploading.
"""
pass
class ConcurrentUploadException(Exception):
"""
Raised when attempting to start an upload that is already in progress.
"""
pass
class UploadManager(object):
"""
Provides utilities for working with Pulp's upload content unit APIs. There
should only be one instance of this class per upload working directory (the
location used to store upload request status files).
Once instantiated, the initialize() method must be called before performing
any operations.
This class' thread safety admittedly isn't the best. The intention, at least
initially, is to be used in a CLI where there will only be a single thread
per process. As such, there are no in memory locks. The tracker files per
upload will carry some state information to prevent two processes from
concurrently modifying the same tracker.
Likewise, the working directory contents are only read once and cached. This
will be a problem if we expect an instance to be long running (i.e. the
interactive shell) and need to account for multiple instances of the shell
running at once.
In the future when we support an interactive shell interface we'll need to
revisit and add some in memory locking and a tighter integration with the
on disk state files.
"""
def __init__(self, upload_working_dir, bindings, chunk_size=DEFAULT_CHUNKSIZE):
"""
@param upload_working_dir: directory in which to store client-side files
to track upload requests; if it doesn't exist it will be created
during the initialize() call
@type upload_working_dir: str
@param bindings: server bindings from the client context
@type bindings: Bindings
@param chunk_size: size in bytes of data to upload on each call to the
server
@type chunk_size: int
"""
self.upload_working_dir = upload_working_dir
self.bindings = bindings
self.chunk_size = chunk_size
# Internal state
self.tracker_files = {}
@classmethod
def init_with_defaults(cls, context):
"""
TODO: get rid of this when we can refactor __init__ and allow a defauly
value for upload_working_dir
This initializes the class with a default upload working directory. It
uses code that had been copy-pasted into all type-specific extensions,
which allows them to eliminate that copy-pasted code.
:param context: a bunch of stuff that the whole CLI passes around
:type context: pulp.client.extensions.core.ClientContext
:return: a new UploadManager instance with a default working directory
:rtype: UploadManager
"""
upload_working_dir = os.path.join(context.config['filesystem']['upload_working_dir'],
'default')
upload_working_dir = os.path.expanduser(upload_working_dir)
return cls(upload_working_dir, context.server)
def initialize(self):
"""
TODO: delete this when we have a chance to refactor this class and the
extensions that use it
"""
pass
def initialize_upload(self, filename, repo_id, unit_type_id, unit_key, unit_metadata,
override_config=None):
"""
Called at the outset of a new upload request. This call requests the
server create a new upload request to be able to upload bits to it.
The parameters provided to this call are stored client-side and will
be sent to the server once the upload itself has completed as part of
the import_upload call.
@param filename: full path to the file on disk to upload; None if the
manager is being used to track a purely metadata unit
@type filename: str, None
@param repo_id: identifies the repository into which the unit is uploaded
@type repo_id: str
@param unit_type_id: identifies the type of unit being uploaded
@type unit_type_id: str
@param unit_key: unique key for the uploaded unit; contents will vary
based on content type
@type unit_key: dict
@param unit_metadata: any metadata about the unit to pass to the importer
when importing the unit; what is done with these values is up to
the importer's implementation
@return: upload ID used to identify this upload request in future calls
"""
# Create the working directory if it doesn't exist
if not os.path.exists(self.upload_working_dir):
os.makedirs(self.upload_working_dir)
response = self.bindings.uploads.initialize_upload().response_body
upload_id = response['upload_id']
location = response['_href']
# Build up the tracker file to track this upload
tracker_filename = self._tracker_filename(upload_id)
tracker_file = UploadTracker(tracker_filename)
tracker_file.upload_id = upload_id
tracker_file.location = location
tracker_file.offset = 0
tracker_file.repo_id = repo_id
tracker_file.unit_type_id = unit_type_id
tracker_file.unit_key = unit_key
tracker_file.unit_metadata = unit_metadata
tracker_file.override_config = override_config
tracker_file.source_filename = filename
# Save the tracker file to disk
tracker_file.save()
# Add to in memory cache
self._cache_tracker_file(tracker_file)
return upload_id
def upload(self, upload_id, callback_func=None, force=False):
"""
Begins or resumes the upload process for the given upload request.
This call will not return until the upload is complete. The other
expected exit point is a KeyboardError to kill the process. The
client-side on disk tracker files will store the current offset and
resume the upload from where it left off on the next call to this method.
The callback_func is used to get feedback on the upload process. After
each successful upload segment call to the server, this function
will be invoked with the new offset in the file and the file size
(intended to be fed into a progress indicator). As this is called
after each upload segment call, the granularity at which it is called
depends on the chunk_size value for this instance.
The callback_func should have a signature of (int, int).
This call will raise an exception if an upload is already in progress
for the given upload_id. If that isn't the case and the tracker file's
running flag is stale, the force parameter will bypass this check and
start the upload anyway.
@param upload_id: identifies the upload request
@type upload_id: str
@param callback_func: optional method to be called after each upload
call to the server
@type callback_func: func
@param force: if true will bypass the running check to prevent concurrent
uploads
@type force: bool
@raise MissingUploadRequestException: if a tracker file for upload_id
cannot be found
@raise ConcurrentUploadException: if an upload is already in progress
for upload_id
"""
tracker_file = self._get_tracker_file_by_id(upload_id)
if tracker_file is None:
raise MissingUploadRequestException()
# If the tracker state gets into a bad place, the caller can force it
# to upload anyway.
if not force and tracker_file.is_running:
raise ConcurrentUploadException()
try:
# Flag the upload request as running so other processes don't
# attempt to run it as well
tracker_file.is_running = True
tracker_file.save()
source_file_size = os.path.getsize(tracker_file.source_filename)
f = open(tracker_file.source_filename, 'r')
while True:
# Load the chunk to upload
f.seek(tracker_file.offset)
data = f.read(self.chunk_size)
if not data:
break
# Server request
self.bindings.uploads.upload_segment(upload_id, tracker_file.offset, data)
# Status update and callback notification
tracker_file.offset = min(tracker_file.offset + self.chunk_size, source_file_size)
tracker_file.save()
callback_func(tracker_file.offset, source_file_size)
tracker_file.is_finished_uploading = True
finally:
# Regardless of how this ends, it's no longer running, so make sure
# we update the tracker accordingly.
tracker_file.is_running = False
tracker_file.save()
def import_upload(self, upload_id):
"""
Once the file is finished uploading, this call will request the server
import the upload. The data provided during initialize_upload is sent
with the call. The server may raise an exception if the import fails
for some reason.
@param upload_id: identifies the upload request to import
@type upload_id: str
@raise MissingUploadRequestException: if there is no tracker file for
the given upload_id
@raise IncompleteUploadException: if the tracker file indicates the
upload has not completed
"""
tracker = self._get_tracker_file_by_id(upload_id)
if tracker is None:
raise MissingUploadRequestException()
if tracker.source_filename and not tracker.is_finished_uploading:
raise IncompleteUploadException()
response = self.bindings.uploads.import_upload(
upload_id, tracker.repo_id, tracker.unit_type_id, tracker.unit_key,
tracker.unit_metadata, tracker.override_config)
return response
def list_uploads(self):
"""
Returns all upload requests known to this instance.
@return: list of UploadTracker instances
@rtype: list
"""
# Load all tracker files from the working directory
try:
for filename in os.listdir(self.upload_working_dir):
full_filename = os.path.join(self.upload_working_dir, filename)
# If the upload requests are getting processed at this time,
# resulting in the tracker files getting deleted, we want to
# ignore the IOError and try to load as many tracker files as we can.
try:
tracker_file = UploadTracker.load(full_filename)
self.tracker_files[tracker_file.upload_id] = tracker_file
except (IOError, OSError):
pass
except OSError, e:
# 1092989: if we get ENOENT (no working dir), assume no uploads are
# taking place
if e.errno == errno.ENOENT:
return []
else:
raise
cached_trackers = self._all_tracker_files()
copies = [copy.copy(t) for t in cached_trackers] # copy for safety
return copies
def get_upload(self, upload_id):
"""
Returns a copy of the upload tracker for the given ID.
@param upload_id: upload to return
@type upload_id: str
@return: copy of the upload tracker if it exists; None otherwise
@rtype: UploadTracker
"""
tracker = self._get_tracker_file_by_id(upload_id)
if tracker:
tracker = copy.copy(tracker) # copy for safety
return tracker
def delete_upload(self, upload_id, force=False):
"""
Deletes the given upload request. Deleting a request is done both
on the server and the client-side tracking file. The server step is
performed first. If it fails, the client-side tracking file is not
deleted. If the server is in a weird state and the client-side
tracker still needs to be deleted, the force flag can be specified to
perform the client-side clean up regardless of the server response.
@param upload_id: identifies the upload request
@type upload_id: str
@param force: if true, delete the client-side knowledge of the upload
regardles of the server's response.
@type force: bool
"""
tracker = self._get_tracker_file_by_id(upload_id)
if tracker is None:
raise MissingUploadRequestException()
if not force and tracker.is_running:
raise ConcurrentUploadException()
# Try to delete the server side upload first. If that fails, the force
# option can be used to delete the client side tracker anyway.
try:
self.bindings.uploads.delete_upload(upload_id)
except Exception:
# Only raise the server side exception on a force
if not force:
raise
# Client Side Clean Up
self._uncache_tracker_file(tracker)
tracker.delete()
def _tracker_filename(self, upload_id):
return os.path.join(self.upload_working_dir, upload_id)
def _cache_tracker_file(self, tracker_file):
self.tracker_files[tracker_file.upload_id] = tracker_file
def _uncache_tracker_file(self, tracker_file):
self.tracker_files.pop(tracker_file.upload_id, None)
def _get_tracker_file_by_id(self, upload_id):
return self.tracker_files.get(upload_id, None)
def _all_tracker_files(self):
return self.tracker_files.values()
class UploadTracker(object):
"""
Client-side file to carry all information related to a single upload
request on the server.
"""
def __init__(self, filename):
self.filename = filename # filename of the tracker file itself
# Upload call information
self.upload_id = None
self.location = None # URL to the upload request on the server
self.offset = None # start of next chunk to upload
self.source_filename = None # path on disk to the file to upload
# Import call information
self.repo_id = None
self.unit_type_id = None
self.unit_key = None
self.unit_metadata = None
# State information
self.is_running = False
self.is_finished_uploading = False
def save(self):
"""
Saves the current state of the tracker file. This will lock on the file
to prevent multiple processes from editing it at once, even though that
probably won't happen if the above code for upload works correctly.
"""
# Can't lock if it doesn't exist, but this should be good enough. The
# filenames are UUIDs and should be reasonably unique, so the chance
# that two files with the same name are saved for the first time is
# really remote.
lock_file = None
if os.path.exists(self.filename):
lock_file = LockFile(self.filename)
lock_file.acquire()
f = open(self.filename, 'w')
pickle.dump(self, f)
f.close()
if lock_file:
lock_file.release()
def delete(self):
os.remove(self.filename)
@classmethod
def load(cls, filename):
"""
Loads the given tracker file. The file must exist and be readable; the
caller should ensure that before loading.
@return: tracker instance
@rtype: UploadTracker
"""
f = open(filename, 'r')
status_file = pickle.load(f)
f.close()
return status_file
|
"""
Helpers methods for site configuration.
"""
from django.conf import settings
from microsite_configuration import microsite
def get_current_site_configuration():
"""
Return configuration for the current site.
Returns:
(openedx.core.djangoapps.site_configuration.models.SiteConfiguration): SiteConfiguration instance associated
with the current site.
"""
# Import is placed here to avoid circular import
from openedx.core.djangoapps.theming.helpers import get_current_site
site = get_current_site()
# Import is placed here to avoid model import at project startup.
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
try:
return getattr(site, "configuration", None)
except SiteConfiguration.DoesNotExist:
return None
def is_site_configuration_enabled():
"""
Returns True is there is SiteConfiguration instance associated with the current site and it is enabled, otherwise
returns False.
Returns:
(bool): True if SiteConfiguration is present and enabled, False otherwise
"""
configuration = get_current_site_configuration()
if configuration:
return configuration.enabled
return False
def has_configuration_override(name):
"""
Returns True/False whether a Site Configuration has a definition for the
specified key.
Args:
name (str): Name of the configuration dict to retrieve.
Returns:
(bool): True if given key is present in the configuration.
"""
configuration = get_current_site_configuration()
if configuration and name in configuration.values:
return True
return False
def get_configuration_value(name, default=None):
"""
Return Configuration value for the key specified as name argument.
Args:
name (str): Name of the key for which to return configuration value.
default: default value tp return if key is not found in the configuration
Returns:
Configuration value for the given key or returns `None` if configuration is not enabled.
"""
configuration = get_current_site_configuration()
return configuration.get_value(name, default)
def get_configuration_dict(name, default=None):
"""
Returns a dictionary product after merging the current site's configuration and
the default value.
Args:
name (str): Name of the configuration dict to retrieve.
default (dict): default dict containing key-value pairs of default values.
Returns:
Configuration value for the given key or returns `{}` if configuration is not enabled.
"""
default = default or {}
output = default.copy()
output.update(
get_configuration_value(name, {}) or {},
)
return output
def get_value(val_name, default=None, **kwargs):
"""
Return configuration value for the key specified as name argument.
Args:
val_name (str): Name of the key for which to return configuration value.
default: default value tp return if key is not found in the configuration
Returns:
Configuration/Microsite value for the given key.
"""
if is_site_configuration_enabled():
# Retrieve the requested field/value from the site configuration
configuration_value = get_configuration_value(val_name, default=default)
else:
# Retrieve the requested field/value from the microsite configuration
configuration_value = microsite.get_value(val_name, default=default, **kwargs)
# Attempt to perform a dictionary update using the provided default
# This will fail if either the default or the microsite value is not a dictionary
try:
value = dict(default)
value.update(configuration_value)
# If the dictionary update fails, just use the microsite value
# TypeError: default is not iterable (simple value or None)
# ValueError: default is iterable but not a dict (list, not dict)
# AttributeError: default does not have an 'update' method
except (TypeError, ValueError, AttributeError):
value = configuration_value
# Return the end result to the caller
return value
def get_dict(name, default=None):
"""
Returns a dictionary product after merging configuration and
the default value.
Args:
name (str): Name of the configuration dict to retrieve.
default (dict): default dict containing key-value pairs of default values.
Returns:
Configuration value for the given key or returns `{}` if configuration not found.
"""
default = default or {}
if is_site_configuration_enabled():
return get_configuration_dict(name, default)
else:
return microsite.get_dict(name, default)
def has_override_value(name):
"""
Returns True/False whether configuration has a definition for the
specified key.
Args:
name (str): Name of the configuration dict to retrieve.
Returns:
(bool): True if given key is present in the configuration.
"""
if is_site_configuration_enabled():
return has_configuration_override(name)
else:
return microsite.has_override_value(name)
def get_value_for_org(org, val_name, default=None):
"""
This returns a configuration value for a site configuration or microsite configuration
which has an org_filter that matches with the argument.
Args:
org (str): Course org filter, this value will be used to filter out the correct site configuration.
name (str): Name of the key for which to return configuration value.
default: default value to return if key is not present in the configuration
Returns:
Configuration value for the given key.
"""
# Here we first look for the asked org inside site configuration, and if org is not present in site configuration
# then we go ahead and look it inside microsite configuration.
# Import is placed here to avoid model import at project startup.
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
if SiteConfiguration.has_org(org):
return SiteConfiguration.get_value_for_org(org, val_name, default)
else:
return microsite.get_value_for_org(org, val_name, default)
def get_current_site_orgs():
"""
This returns the orgs configured in site configuration or microsite configuration for the current site.
Returns:
list: A list of organization names.
"""
course_org_filter = get_value('course_org_filter')
# Make sure we have a list
if course_org_filter and not isinstance(course_org_filter, list):
course_org_filter = [course_org_filter]
return course_org_filter
def get_all_orgs():
"""
This returns all of the orgs that are considered in site configurations or microsite configuration,
This can be used, for example, to do filtering.
Returns:
A list of all organizations present in either microsite configuration or site configuration.
"""
# Import is placed here to avoid model import at project startup.
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
site_configuration_orgs = SiteConfiguration.get_all_orgs()
microsite_orgs = microsite.get_all_orgs()
return site_configuration_orgs.union(microsite_orgs)
def page_title_breadcrumbs(*crumbs, **kwargs):
"""
This function creates a suitable page title in the form:
Specific | Less Specific | General | edX
It will output the correct platform name for the request.
Pass in a `separator` kwarg to override the default of " | "
"""
platform_name = get_value('platform_name', settings.PLATFORM_NAME)
separator = kwargs.get("separator", " | ")
crumbs = [c for c in crumbs if c is not None]
if crumbs:
return u'{}{}{}'.format(separator.join(crumbs), separator, platform_name)
else:
return platform_name
|
from cinder.tests.functional import functional_helpers
class LoginTest(functional_helpers._FunctionalTestBase):
def test_login(self):
"""Simple check - we list volumes - so we know we're logged in."""
volumes = self.api.get_volumes()
self.assertIsNotNone(volumes)
|
from odoo import api, fields, models
from ast import literal_eval
class ResCompany(models.Model):
_inherit = 'res.company'
def _compute_website_theme_onboarding_done(self):
""" The step is marked as done if one theme is installed. """
# we need the same domain as the existing action
action = self.env.ref('website_theme_install.theme_install_kanban_action').read()[0]
domain = literal_eval(action['domain'])
domain.append(('state', '=', 'installed'))
installed_themes_count = self.env['ir.module.module'].sudo().search_count(domain)
for record in self:
record.website_theme_onboarding_done = (installed_themes_count > 0)
website_theme_onboarding_done = fields.Boolean("Onboarding website theme step done",
compute='_compute_website_theme_onboarding_done')
@api.model
def action_open_website_theme_selector(self):
action = self.env.ref('website_theme_install.theme_install_kanban_action').read()[0]
action['target'] = 'new'
return action
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'perf'))
from chrome_telemetry_build import chromium_config
TELEMETRY_DIR = chromium_config.GetTelemetryDir()
_top_level_dir = os.path.dirname(os.path.realpath(__file__))
def Config(benchmark_subdirs):
return chromium_config.ChromiumConfig(
top_level_dir=_top_level_dir,
benchmark_dirs=[os.path.join(_top_level_dir, subdir)
for subdir in benchmark_subdirs])
|
"""
.. _on_screen_notification_tests:
:mod:`on_screen_notification_tests` -- Module for testing on-screen notifications
=================================================================================
.. automodule:: on_screen_notification_tests
.. moduleauthor:: Evgeny Fadeev <evgeny.fadeev@gmail.com>
"""
import datetime
import time
from django.test import TestCase
from askbot import models
from askbot import const
from askbot.tests.utils import create_user
def get_re_notif_after(timestamp):
"""returns query set with response notifications
posted after the ``timestamp`` - a ``datetime.datetime`` instance
"""
notifications = models.Activity.objects.filter(
activity_type__in = const.RESPONSE_ACTIVITY_TYPES_FOR_DISPLAY,
active_at__gte = timestamp
)
return notifications
class OnScreenUpdateNotificationTests(TestCase):
"""Test update notifications that are displayed on
screen in the user profile responses view
and "the red envelope"
"""
def reset_response_counts(self):
self.reload_users()
models.ActivityAuditStatus.objects.all().delete()
for user in self.users:
user.new_response_count = 0
user.seen_response_count = 0
user.save()
def reload_users(self):
self.u11 = models.User.objects.get(id=self.u11.id)
self.u12 = models.User.objects.get(id=self.u12.id)
self.u13 = models.User.objects.get(id=self.u13.id)
self.u14 = models.User.objects.get(id=self.u14.id)
self.u21 = models.User.objects.get(id=self.u21.id)
self.u22 = models.User.objects.get(id=self.u22.id)
self.u23 = models.User.objects.get(id=self.u23.id)
self.u24 = models.User.objects.get(id=self.u24.id)
self.u31 = models.User.objects.get(id=self.u31.id)
self.u32 = models.User.objects.get(id=self.u32.id)
self.u33 = models.User.objects.get(id=self.u33.id)
self.u34 = models.User.objects.get(id=self.u34.id)
self.users = [
self.u11,
self.u12,
self.u13,
self.u14,
self.u21,
self.u22,
self.u23,
self.u24,
self.u31,
self.u32,
self.u33,
self.u34,
]
def setUp(self):
#users for the question
self.u11 = create_user('user11', 'user11@example.com', status='m')
self.u12 = create_user('user12', 'user12@example.com', status='m')
self.u13 = create_user('user13', 'user13@example.com', status='m')
self.u14 = create_user('user14', 'user14@example.com', status='m')
#users for first answer
self.u21 = create_user('user21', 'user21@example.com', status='m')#post answer
self.u22 = create_user('user22', 'user22@example.com', status='m')#edit answer
self.u23 = create_user('user23', 'user23@example.com', status='m')
self.u24 = create_user('user24', 'user24@example.com', status='m')
#users for second answer
self.u31 = create_user('user31', 'user31@example.com', status='m')#post answer
self.u32 = create_user('user32', 'user32@example.com', status='m')#edit answer
self.u33 = create_user('user33', 'user33@example.com', status='m')
self.u34 = create_user('user34', 'user34@example.com', status='m')
#a hack to initialize .users list
self.reload_users()
#pre-populate askbot with some content
#create a question and two answers, each post gets two comments
#users have two digit codes. What users do in the setup code
#is explained below (x is a variable that takes integer values of [1-3])
#user x1 makes a post, users x2 and x3 add comments to that post
#users 1x work on question, 2x and 3x on the answers
#users x4 do not do anyting in the setup code
self.thread = models.Thread.objects.create_new(
title = 'test question',
author = self.u11,
added_at = datetime.datetime.now(),
wiki = False,
tagnames = 'test',
text = 'hey listen up',
)
self.question = self.thread._question_post()
self.comment12 = self.question.add_comment(
user = self.u12,
comment = 'comment12'
)
self.comment13 = self.question.add_comment(
user = self.u13,
comment = 'comment13'
)
self.answer1 = models.Post.objects.create_new_answer(
thread = self.thread,
author = self.u21,
added_at = datetime.datetime.now(),
text = 'answer1'
)
self.comment22 = self.answer1.add_comment(
user = self.u22,
comment = 'comment22'
)
self.comment23 = self.answer1.add_comment(
user = self.u23,
comment = 'comment23'
)
self.answer2 = models.Post.objects.create_new_answer(
thread = self.thread,
author = self.u31,
added_at = datetime.datetime.now(),
text = 'answer2'
)
self.comment32 = self.answer2.add_comment(
user = self.u32,
comment = 'comment32'
)
self.comment33 = self.answer2.add_comment(
user = self.u33,
comment = 'comment33'
)
def assertNewResponseCountsEqual(self, counts_vector):
self.reload_users()
self.assertEquals(
[
self.u11.new_response_count,
self.u12.new_response_count,
self.u13.new_response_count,
self.u14.new_response_count,
self.u21.new_response_count,
self.u22.new_response_count,
self.u23.new_response_count,
self.u24.new_response_count,
self.u31.new_response_count,
self.u32.new_response_count,
self.u33.new_response_count,
self.u34.new_response_count,
],
counts_vector
)
def assertSeenResponseCountsEqual(self, counts_vector):
self.reload_users()
self.assertEquals(
[
self.u11.seen_response_count,
self.u12.seen_response_count,
self.u13.seen_response_count,
self.u14.seen_response_count,
self.u21.seen_response_count,
self.u22.seen_response_count,
self.u23.seen_response_count,
self.u24.seen_response_count,
self.u31.seen_response_count,
self.u32.seen_response_count,
self.u33.seen_response_count,
self.u34.seen_response_count,
],
counts_vector
)
def post_then_delete_answer_comment(self):
pass
def post_then_delete_answer(self):
pass
def post_then_delete_question_comment(self):
pass
def post_mention_in_question_then_delete(self):
pass
def post_mention_in_answer_then_delete(self):
pass
def post_mention_in_question_then_edit_out(self):
pass
def post_mention_in_answer_then_edit_out(self):
pass
def test_post_mention_in_comments_then_delete(self):
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
comment = self.question.add_comment(
user = self.u11,
comment = '@user12 howyou doin?',
added_at = timestamp
)
comment.delete()
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 0)
self.assertNewResponseCountsEqual(
[
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
comment = self.answer1.add_comment(
user = self.u21,
comment = 'hey @user22 blah',
added_at = timestamp
)
comment.delete()
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 0)
self.assertNewResponseCountsEqual(
[
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
def test_self_comments(self):
"""poster of the question or answer adds a comment
under the corresponding question or answer"""
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.question.add_comment(
user = self.u11,
comment = 'self-comment',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u12, self.u13]),
)
self.assertNewResponseCountsEqual(
[
0, 1, 1, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.answer1.add_comment(
user = self.u21,
comment = 'self-comment 2',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u22, self.u23]),
)
self.assertNewResponseCountsEqual(
[
0, 0, 0, 0,
0, 1, 1, 0,
0, 0, 0, 0,
]
)
def test_self_mention_not_posting_in_comment_to_question1(self):
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.question.add_comment(
user = self.u11,
comment = 'self-comment @user11',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u12, self.u13]),
)
self.assertNewResponseCountsEqual(
[
0, 1, 1, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
def test_self_mention_not_posting_in_comment_to_question2(self):
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.question.add_comment(
user = self.u11,
comment = 'self-comment @user11 blah',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u12, self.u13]),
)
self.assertNewResponseCountsEqual(
[
0, 1, 1, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
def test_self_mention_not_posting_in_comment_to_answer(self):
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.answer1.add_comment(
user = self.u21,
comment = 'self-comment 1 @user21',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u22, self.u23]),
)
self.assertNewResponseCountsEqual(
[
0, 0, 0, 0,
0, 1, 1, 0,
0, 0, 0, 0,
]
)
def test_responses_clear_after_visit(self):
"""user 14 posts comment under question
user 11, 12, 21, and 22 visit the question
user 13 does not
the expected outcome is that 11 and 12 have
0 responses and 13 still has one
remaining users still have notifications
"""
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.question.add_comment(
user = self.u14,
comment = 'dudududududu',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u11, self.u12, self.u13])#all users are notified
)
self.assertNewResponseCountsEqual(
[
1, 1, 1, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
self.assertSeenResponseCountsEqual(
[
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
self.u11.visit_question(self.question)
self.u12.visit_question(self.question)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(#visitors are not notified
set(notifications[0].recipients.all()),
set([self.u11, self.u12, self.u13])
)
self.assertEqual(
self.u11.activityauditstatus_set.all()[0].status,
models.ActivityAuditStatus.STATUS_SEEN
)
self.assertEqual(
self.u12.activityauditstatus_set.all()[0].status,
models.ActivityAuditStatus.STATUS_SEEN
)
self.assertEqual(
self.u13.activityauditstatus_set.all()[0].status,
models.ActivityAuditStatus.STATUS_NEW
)
self.assertNewResponseCountsEqual(
[
0, 0, 1, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
self.assertSeenResponseCountsEqual(
[
1, 1, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
def test_comments_to_post_authors(self):
self.question.apply_edit(
edited_by = self.u14,
text = 'now much better',
comment = 'improved text'
)
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.question.add_comment(
user = self.u12,
comment = 'self-comment 1',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u11, self.u13, self.u14]),
)
self.assertNewResponseCountsEqual(
[
1, 0, 1, 1,
0, 0, 0, 0,
0, 0, 0, 0,
]
)
self.answer1.apply_edit(
edited_by = self.u24,
text = 'now much better',
comment = 'improved text'
)
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.answer1.add_comment(
user = self.u22,
comment = 'self-comment 1',
added_at = timestamp
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u21, self.u23, self.u24]),
)
self.assertNewResponseCountsEqual(
[
0, 0, 0, 0,
1, 0, 1, 1,
0, 0, 0, 0,
]
)
def test_question_edit(self):
"""when question is edited
response receivers are question authors, commenters
and answer authors, but not answer commenters
"""
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.question.apply_edit(
edited_by = self.u14,
text = 'waaay better question!',
comment = 'improved question',
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u11, self.u12, self.u13, self.u21, self.u31])
)
self.assertNewResponseCountsEqual(
[
1, 1, 1, 0,
1, 0, 0, 0,
1, 0, 0, 0,
]
)
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.question.apply_edit(
edited_by = self.u31,
text = 'waaay even better question!',
comment = 'improved question',
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set([self.u11, self.u12, self.u13, self.u14, self.u21])
)
self.assertNewResponseCountsEqual(
[
1, 1, 1, 1,
1, 0, 0, 0,
0, 0, 0, 0,
]
)
def test_answer_edit(self):
"""
"""
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.answer1.apply_edit(
edited_by = self.u24,
text = 'waaay better answer!',
comment = 'improved answer1',
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set(
[
self.u11, self.u12, self.u13,
self.u21, self.u22, self.u23,
self.u31
]
)
)
self.assertNewResponseCountsEqual(
[
1, 1, 1, 0,
1, 1, 1, 0,
1, 0, 0, 0,
]
)
def test_new_answer(self):
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.answer3 = models.Post.objects.create_new_answer(
thread = self.thread,
author = self.u11,
added_at = timestamp,
text = 'answer3'
)
time_end = datetime.datetime.now()
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set(
[
self.u12, self.u13,
self.u21, self.u31
]
)
)
self.assertNewResponseCountsEqual(
[
0, 1, 1, 0,
1, 0, 0, 0,
1, 0, 0, 0,
]
)
self.reset_response_counts()
time.sleep(1)
timestamp = datetime.datetime.now()
self.answer3 = models.Post.objects.create_new_answer(
thread = self.thread,
author = self.u31,
added_at = timestamp,
text = 'answer4'
)
notifications = get_re_notif_after(timestamp)
self.assertEqual(len(notifications), 1)
self.assertEqual(
set(notifications[0].recipients.all()),
set(
[
self.u11, self.u12, self.u13,
self.u21
]
)
)
self.assertNewResponseCountsEqual(
[
1, 1, 1, 0,
1, 0, 0, 0,
0, 0, 0, 0,
]
)
|
from .ExodusSourceLineSampler import ExodusSourceLineSampler
from ..base import ChiggerResult
class ExodusResultLineSampler(ChiggerResult):
"""
Object for sampling ExodusSource object contained in an ExodusResult.
"""
@staticmethod
def getOptions():
opt = ChiggerResult.getOptions()
return opt
def __init__(self, exodus_result, **kwargs):
self._exodus_result = exodus_result
sources = []
for src in self._exodus_result:
sources.append(ExodusSourceLineSampler(src, **kwargs))
super(ExodusResultLineSampler, self).__init__(*sources,
renderer=exodus_result.getVTKRenderer(),
viewport=exodus_result.getOption('viewport'),
**kwargs)
|
"""
Step definitions for providing notes/hints.
The note steps explain what was important in the last few steps of
this scenario (for a test reader).
"""
from __future__ import absolute_import
from behave import step
@step(u'note that "{remark}"')
def step_note_that(context, remark):
"""
Used as generic step that provides an additional remark/hint
and enhance the readability/understanding without performing any check.
.. code-block:: gherkin
Given that today is "April 1st"
But note that "April 1st is Fools day (and beware)"
"""
log = getattr(context, "log", None)
if log:
log.info(u"NOTE: %s;" % remark)
|
"""Utility code for constructing importers, etc."""
from . import abc
from ._bootstrap import module_from_spec
from ._bootstrap import _resolve_name
from ._bootstrap import spec_from_loader
from ._bootstrap import _find_spec
from ._bootstrap_external import MAGIC_NUMBER
from ._bootstrap_external import _RAW_MAGIC_NUMBER
from ._bootstrap_external import cache_from_source
from ._bootstrap_external import decode_source
from ._bootstrap_external import source_from_cache
from ._bootstrap_external import spec_from_file_location
from contextlib import contextmanager
import _imp
import functools
import sys
import types
import warnings
def source_hash(source_bytes):
"Return the hash of *source_bytes* as used in hash-based pyc files."
return _imp.source_hash(_RAW_MAGIC_NUMBER, source_bytes)
def resolve_name(name, package):
"""Resolve a relative module name to an absolute one."""
if not name.startswith('.'):
return name
elif not package:
raise ValueError(f'no package specified for {repr(name)} '
'(required for relative module names)')
level = 0
for character in name:
if character != '.':
break
level += 1
return _resolve_name(name[level:], package, level)
def _find_spec_from_path(name, path=None):
"""Return the spec for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__spec__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable spec with the
value of 'path' given to the finders. None is returned if no spec could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct spec.
"""
if name not in sys.modules:
return _find_spec(name, path)
else:
module = sys.modules[name]
if module is None:
return None
try:
spec = module.__spec__
except AttributeError:
raise ValueError('{}.__spec__ is not set'.format(name)) from None
else:
if spec is None:
raise ValueError('{}.__spec__ is None'.format(name))
return spec
def find_spec(name, package=None):
"""Return the spec for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__spec__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable spec with the
value of 'path' given to the finders. None is returned if no spec could
be found.
If the name is for submodule (contains a dot), the parent module is
automatically imported.
The name and package arguments work the same as importlib.import_module().
In other words, relative module names (with leading dots) work.
"""
fullname = resolve_name(name, package) if name.startswith('.') else name
if fullname not in sys.modules:
parent_name = fullname.rpartition('.')[0]
if parent_name:
parent = __import__(parent_name, fromlist=['__path__'])
try:
parent_path = parent.__path__
except AttributeError as e:
raise ModuleNotFoundError(
f"__path__ attribute not found on {parent_name!r} "
f"while trying to find {fullname!r}", name=fullname) from e
else:
parent_path = None
return _find_spec(fullname, parent_path)
else:
module = sys.modules[fullname]
if module is None:
return None
try:
spec = module.__spec__
except AttributeError:
raise ValueError('{}.__spec__ is not set'.format(name)) from None
else:
if spec is None:
raise ValueError('{}.__spec__ is None'.format(name))
return spec
@contextmanager
def _module_to_load(name):
is_reload = name in sys.modules
module = sys.modules.get(name)
if not is_reload:
# This must be done before open() is called as the 'io' module
# implicitly imports 'locale' and would otherwise trigger an
# infinite loop.
module = type(sys)(name)
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes wrong)
module.__initializing__ = True
sys.modules[name] = module
try:
yield module
except Exception:
if not is_reload:
try:
del sys.modules[name]
except KeyError:
pass
finally:
module.__initializing__ = False
def set_package(fxn):
"""Set __package__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_package_wrapper(*args, **kwargs):
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
return set_package_wrapper
def set_loader(fxn):
"""Set __loader__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_loader_wrapper(self, *args, **kwargs):
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
module = fxn(self, *args, **kwargs)
if getattr(module, '__loader__', None) is None:
module.__loader__ = self
return module
return set_loader_wrapper
def module_for_loader(fxn):
"""Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module. If the module is new, then __name__
is set the first argument to the method, __loader__ is set to self, and
__package__ is set accordingly (if self.is_package() is defined) will be set
before it is passed to the decorated function (if self.is_package() does
not work for the module it will be set post-load).
If an exception is raised and the decorator created the module it is
subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument.
"""
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
@functools.wraps(fxn)
def module_for_loader_wrapper(self, fullname, *args, **kwargs):
with _module_to_load(fullname) as module:
module.__loader__ = self
try:
is_package = self.is_package(fullname)
except (ImportError, AttributeError):
pass
else:
if is_package:
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
# If __package__ was not set above, __import__() will do it later.
return fxn(self, module, *args, **kwargs)
return module_for_loader_wrapper
class _LazyModule(types.ModuleType):
"""A subclass of the module type which triggers loading upon attribute access."""
def __getattribute__(self, attr):
"""Trigger the load of the module and return the attribute."""
# All module metadata must be garnered from __spec__ in order to avoid
# using mutated values.
# Stop triggering this method.
self.__class__ = types.ModuleType
# Get the original name to make sure no object substitution occurred
# in sys.modules.
original_name = self.__spec__.name
# Figure out exactly what attributes were mutated between the creation
# of the module and now.
attrs_then = self.__spec__.loader_state['__dict__']
original_type = self.__spec__.loader_state['__class__']
attrs_now = self.__dict__
attrs_updated = {}
for key, value in attrs_now.items():
# Code that set the attribute may have kept a reference to the
# assigned object, making identity more important than equality.
if key not in attrs_then:
attrs_updated[key] = value
elif id(attrs_now[key]) != id(attrs_then[key]):
attrs_updated[key] = value
self.__spec__.loader.exec_module(self)
# If exec_module() was used directly there is no guarantee the module
# object was put into sys.modules.
if original_name in sys.modules:
if id(self) != id(sys.modules[original_name]):
raise ValueError(f"module object for {original_name!r} "
"substituted in sys.modules during a lazy "
"load")
# Update after loading since that's what would happen in an eager
# loading situation.
self.__dict__.update(attrs_updated)
return getattr(self, attr)
def __delattr__(self, attr):
"""Trigger the load and then perform the deletion."""
# To trigger the load and raise an exception if the attribute
# doesn't exist.
self.__getattribute__(attr)
delattr(self, attr)
class LazyLoader(abc.Loader):
"""A loader that creates a module which defers loading until attribute access."""
@staticmethod
def __check_eager_loader(loader):
if not hasattr(loader, 'exec_module'):
raise TypeError('loader must define exec_module()')
@classmethod
def factory(cls, loader):
"""Construct a callable which returns the eager loader made lazy."""
cls.__check_eager_loader(loader)
return lambda *args, **kwargs: cls(loader(*args, **kwargs))
def __init__(self, loader):
self.__check_eager_loader(loader)
self.loader = loader
def create_module(self, spec):
return self.loader.create_module(spec)
def exec_module(self, module):
"""Make the module load lazily."""
module.__spec__.loader = self.loader
module.__loader__ = self.loader
# Don't need to worry about deep-copying as trying to set an attribute
# on an object would have triggered the load,
# e.g. ``module.__spec__.loader = None`` would trigger a load from
# trying to access module.__spec__.
loader_state = {}
loader_state['__dict__'] = module.__dict__.copy()
loader_state['__class__'] = module.__class__
module.__spec__.loader_state = loader_state
module.__class__ = _LazyModule
|
farmer = {
'kb': '''
Farmer(Mac)
Rabbit(Pete)
Mother(MrsMac, Mac)
Mother(MrsRabbit, Pete)
(Rabbit(r) & Farmer(f)) ==> Hates(f, r)
(Mother(m, c)) ==> Loves(m, c)
(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)
(Farmer(f)) ==> Human(f)
(Mother(m, h) & Human(h)) ==> Human(m)
''',
'queries':'''
Human(x)
Hates(x, y)
''',
}
weapons = {
'kb': '''
(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)
Owns(Nono, M1)
Missile(M1)
(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)
Missile(x) ==> Weapon(x)
Enemy(x, America) ==> Hostile(x)
American(West)
Enemy(Nono, America)
''',
'queries':'''
Criminal(x)
''',
}
cocktails = {'kb': '''
Liquor(Vodka)
Liquor(Tequila)
Liquor(Gin)
Liquor(Whiskey)
Liquor(Rum)
Mixer(OrangeJuice)
Mixer(Coke)
Mixer(MargaritaMix)
Mixer(BloodMaryMix)
Mixer(GingerAle)
Cocktail(Vodka, OrangeJuice)
Cocktail(Vodka, BloodyMaryMix)
Cocktail(Vodka, GingerAle)
Cocktail(Tequila, OrangeJuice)
Cocktail(Tequila, MargaritaMix)
Cocktail(Tequila, BloodMaryMix)
Cocktail(Whiskey, Coke)
Cocktail(Whiskey, GingerAle)
Cocktail(Rum, OrangeJuice)
Cocktail(Rum, Coke)
(Liquor(l) & Mixer(m)) ==> Cocktail(l, m)
(Mixer(m)) ==> Nonalcoholic(m)
(Liquor(l)) ==> Alcoholic(l)
(Mixer(m)) ==> Drink(m)
(Liquor(l)) ==> Drink(m)
''',
'queries': '''
Liquor(x)
Cocktail(x, y)
''',
}
Examples = {
'cocktails': cocktails,
}
|
from gnuradio import gr, gr_unittest
from gnuradio import blocks, digital
import pmt
import numpy as np
import sys
def make_length_tag(offset, length):
return gr.python_to_tag({'offset' : offset,
'key' : pmt.intern('packet_len'),
'value' : pmt.from_long(length),
'srcid' : pmt.intern('qa_burst_shaper')})
def make_tag(offset, key, value):
return gr.python_to_tag({'offset' : offset,
'key' : pmt.intern(key),
'value' : value,
'srcid' : pmt.intern('qa_burst_shaper')})
def compare_tags(a, b):
return a.offset == b.offset and pmt.equal(a.key, b.key) and \
pmt.equal(a.value, b.value)
class qa_burst_shaper (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_ff (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length - len(window)), window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10,
dtype=complex) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5, dtype=complex),
-4.0*np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad, dtype=complex), window[0:5],
np.ones(length - len(window), dtype=complex),
window[5:10], np.zeros(postpad,
dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_ff_with_phasing (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5)
for i in xrange(5):
phasing[i] = ((-1.0)**i)
expected = np.concatenate((np.zeros(prepad), phasing*window[0:5],
np.ones(length), phasing*window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc_with_phasing (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10,
dtype=complex) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5, dtype=complex),
-4.0*np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5, dtype=complex)
for i in xrange(5):
phasing[i] = complex((-1.0)**i)
expected = np.concatenate((np.zeros(prepad, dtype=complex),
phasing*window[0:5],
np.ones(length, dtype=complex),
phasing*window[5:10],
np.zeros(postpad, dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_odd_window (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
-4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:6],
np.ones(length - len(window) - 1),
window[5:11], np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_short_burst (self):
prepad = 10
postpad = 10
length = 9
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
-4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:4],
np.ones(1), window[5:9],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_consecutive_bursts (self):
prepad = 10
postpad = 10
length1 = 15
length2 = 25
data = np.concatenate((np.ones(length1), -1.0*np.ones(length2),
np.zeros(10))) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length1), make_length_tag(length1, length2))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0*window[0:5],
-1.0*np.ones(length2 - len(window)),
-1.0*window[5:10], np.zeros(postpad)))
etags = (make_length_tag(0, length1 + prepad + postpad),
make_length_tag(length1 + prepad + postpad,
length2 + prepad + postpad))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in xrange(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_gap (self):
prepad = 10
postpad = 10
length = 20
gap_len = 5
data = np.arange(2*length + 10,
dtype=float) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
ewindow = window * np.array([1,-1,1,-1,1,1,-1,1,-1,1],dtype=float)
tags = (make_length_tag(0, length),
make_length_tag(length + gap_len, length))
expected = np.concatenate((np.zeros(prepad), ewindow[0:5],
np.arange(0, length, dtype=float),
ewindow[5:10], np.zeros(postpad),
np.zeros(prepad), ewindow[0:5],
np.arange(length + gap_len,
2*length + gap_len, dtype=float),
ewindow[5:10], np.zeros(postpad)))
burst_len = length + len(window) + prepad + postpad
etags = (make_length_tag(0, burst_len),
make_length_tag(burst_len, burst_len))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in xrange(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_propagation (self):
prepad = 10
postpad = 10
length1 = 15
length2 = 25
gap_len = 5
lentag1_offset = 0
lentag2_offset = length1 + gap_len
tag1_offset = 0 # accompanies first length tag
tag2_offset = length1 + gap_len # accompanies second length tag
tag3_offset = 2 # in ramp-up state
tag4_offset = length1 + 2 # in gap; tag will be dropped
tag5_offset = length1 + gap_len + 7 # in copy state
data = np.concatenate((np.ones(length1), np.zeros(gap_len),
-1.0*np.ones(length2), np.zeros(10)))
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(lentag1_offset, length1),
make_length_tag(lentag2_offset, length2),
make_tag(tag1_offset, 'head', pmt.intern('tag1')),
make_tag(tag2_offset, 'head', pmt.intern('tag2')),
make_tag(tag3_offset, 'body', pmt.intern('tag3')),
make_tag(tag4_offset, 'body', pmt.intern('tag4')),
make_tag(tag5_offset, 'body', pmt.intern('tag5')))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0*window[0:5],
-1.0*np.ones(length2 - len(window)),
-1.0*window[5:10], np.zeros(postpad)))
elentag1_offset = 0
elentag2_offset = length1 + prepad + postpad
etag1_offset = 0
etag2_offset = elentag2_offset
etag3_offset = prepad + tag3_offset
etag5_offset = 2*prepad + postpad + tag5_offset - gap_len
etags = (make_length_tag(elentag1_offset, length1 + prepad + postpad),
make_length_tag(elentag2_offset, length2 + prepad + postpad),
make_tag(etag1_offset, 'head', pmt.intern('tag1')),
make_tag(etag2_offset, 'head', pmt.intern('tag2')),
make_tag(etag3_offset, 'body', pmt.intern('tag3')),
make_tag(etag5_offset, 'body', pmt.intern('tag5')))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for x, y in zip(sorted(sink.tags(), key=gr.tag_t_offset_compare_key()),
sorted(etags, key=gr.tag_t_offset_compare_key())):
self.assertTrue(compare_tags(x, y))
if __name__ == '__main__':
gr_unittest.run(qa_burst_shaper, "qa_burst_shaper.xml")
|
from __future__ import unicode_literals
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import no_oracle
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article, Event
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@skipUnlessDBFeature("has_transform_method")
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("has_unionagg_method")
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',
srid=4326
)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if connection.features.supports_transform:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertIsInstance(d['point'], Geometry)
self.assertIsInstance(t[1], Geometry)
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
@override_settings(USE_TZ=True)
def test_07b_values(self):
"Testing values() and values_list() with aware datetime. See #21565."
Event.objects.create(name="foo", when=timezone.now())
list(Event.objects.values_list('when'))
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
# TODO: fix on Oracle -- qs2 returns an empty result for an unknown reason
@no_oracle
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertIn('Aurora', names)
self.assertIn('Kecksburg', names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertIsInstance(qs[0]['point'], GEOSGeometry)
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@skipUnlessDBFeature("supports_collect_aggr")
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry(
'MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,'
'-95.363151 29.763374,-96.801611 32.782057)'
)
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertTrue(ref_geom.equals(coll))
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
|
from debug_toolbar.utils.sqlparse.tokens import *
KEYWORDS = {
'ABORT': Keyword,
'ABS': Keyword,
'ABSOLUTE': Keyword,
'ACCESS': Keyword,
'ADA': Keyword,
'ADD': Keyword,
'ADMIN': Keyword,
'AFTER': Keyword,
'AGGREGATE': Keyword,
'ALIAS': Keyword,
'ALL': Keyword,
'ALLOCATE': Keyword,
'ANALYSE': Keyword,
'ANALYZE': Keyword,
'AND': Keyword,
'ANY': Keyword,
'ARE': Keyword,
'AS': Keyword,
'ASC': Keyword,
'ASENSITIVE': Keyword,
'ASSERTION': Keyword,
'ASSIGNMENT': Keyword,
'ASYMMETRIC': Keyword,
'AT': Keyword,
'ATOMIC': Keyword,
'AUTHORIZATION': Keyword,
'AVG': Keyword,
'BACKWARD': Keyword,
'BEFORE': Keyword,
'BEGIN': Keyword,
'BETWEEN': Keyword,
'BITVAR': Keyword,
'BIT_LENGTH': Keyword,
'BOTH': Keyword,
'BREADTH': Keyword,
'BY': Keyword,
'CACHE': Keyword,
'CALL': Keyword,
'CALLED': Keyword,
'CARDINALITY': Keyword,
'CASCADE': Keyword,
'CASCADED': Keyword,
'CASE': Keyword,
'CAST': Keyword,
'CATALOG': Keyword,
'CATALOG_NAME': Keyword,
'CHAIN': Keyword,
'CHARACTERISTICS': Keyword,
'CHARACTER_LENGTH': Keyword,
'CHARACTER_SET_CATALOG': Keyword,
'CHARACTER_SET_NAME': Keyword,
'CHARACTER_SET_SCHEMA': Keyword,
'CHAR_LENGTH': Keyword,
'CHECK': Keyword,
'CHECKED': Keyword,
'CHECKPOINT': Keyword,
'CLASS': Keyword,
'CLASS_ORIGIN': Keyword,
'CLOB': Keyword,
'CLOSE': Keyword,
'CLUSTER': Keyword,
'COALSECE': Keyword,
'COBOL': Keyword,
'COLLATE': Keyword,
'COLLATION': Keyword,
'COLLATION_CATALOG': Keyword,
'COLLATION_NAME': Keyword,
'COLLATION_SCHEMA': Keyword,
'COLUMN': Keyword,
'COLUMN_NAME': Keyword,
'COMMAND_FUNCTION': Keyword,
'COMMAND_FUNCTION_CODE': Keyword,
'COMMENT': Keyword,
'COMMIT': Keyword,
'COMMITTED': Keyword,
'COMPLETION': Keyword,
'CONDITION_NUMBER': Keyword,
'CONNECT': Keyword,
'CONNECTION': Keyword,
'CONNECTION_NAME': Keyword,
'CONSTRAINT': Keyword,
'CONSTRAINTS': Keyword,
'CONSTRAINT_CATALOG': Keyword,
'CONSTRAINT_NAME': Keyword,
'CONSTRAINT_SCHEMA': Keyword,
'CONSTRUCTOR': Keyword,
'CONTAINS': Keyword,
'CONTINUE': Keyword,
'CONVERSION': Keyword,
'CONVERT': Keyword,
'COPY': Keyword,
'CORRESPONTING': Keyword,
'COUNT': Keyword,
'CREATEDB': Keyword,
'CREATEUSER': Keyword,
'CROSS': Keyword,
'CUBE': Keyword,
'CURRENT': Keyword,
'CURRENT_DATE': Keyword,
'CURRENT_PATH': Keyword,
'CURRENT_ROLE': Keyword,
'CURRENT_TIME': Keyword,
'CURRENT_TIMESTAMP': Keyword,
'CURRENT_USER': Keyword,
'CURSOR': Keyword,
'CURSOR_NAME': Keyword,
'CYCLE': Keyword,
'DATA': Keyword,
'DATABASE': Keyword,
'DATETIME_INTERVAL_CODE': Keyword,
'DATETIME_INTERVAL_PRECISION': Keyword,
'DAY': Keyword,
'DEALLOCATE': Keyword,
'DECLARE': Keyword,
'DEFAULT': Keyword,
'DEFAULTS': Keyword,
'DEFERRABLE': Keyword,
'DEFERRED': Keyword,
'DEFINED': Keyword,
'DEFINER': Keyword,
'DELIMITER': Keyword,
'DELIMITERS': Keyword,
'DEREF': Keyword,
'DESC': Keyword,
'DESCRIBE': Keyword,
'DESCRIPTOR': Keyword,
'DESTROY': Keyword,
'DESTRUCTOR': Keyword,
'DETERMINISTIC': Keyword,
'DIAGNOSTICS': Keyword,
'DICTIONARY': Keyword,
'DISCONNECT': Keyword,
'DISPATCH': Keyword,
'DISTINCT': Keyword,
'DO': Keyword,
'DOMAIN': Keyword,
'DYNAMIC': Keyword,
'DYNAMIC_FUNCTION': Keyword,
'DYNAMIC_FUNCTION_CODE': Keyword,
'EACH': Keyword,
'ELSE': Keyword,
'ENCODING': Keyword,
'ENCRYPTED': Keyword,
'END': Keyword,
'END-EXEC': Keyword,
'EQUALS': Keyword,
'ESCAPE': Keyword,
'EVERY': Keyword,
'EXCEPT': Keyword,
'ESCEPTION': Keyword,
'EXCLUDING': Keyword,
'EXCLUSIVE': Keyword,
'EXEC': Keyword,
'EXECUTE': Keyword,
'EXISTING': Keyword,
'EXISTS': Keyword,
'EXTERNAL': Keyword,
'EXTRACT': Keyword,
'FALSE': Keyword,
'FETCH': Keyword,
'FINAL': Keyword,
'FIRST': Keyword,
'FOR': Keyword,
'FORCE': Keyword,
'FOREIGN': Keyword,
'FORTRAN': Keyword,
'FORWARD': Keyword,
'FOUND': Keyword,
'FREE': Keyword,
'FREEZE': Keyword,
'FROM': Keyword,
'FULL': Keyword,
'FUNCTION': Keyword,
'G': Keyword,
'GENERAL': Keyword,
'GENERATED': Keyword,
'GET': Keyword,
'GLOBAL': Keyword,
'GO': Keyword,
'GOTO': Keyword,
'GRANT': Keyword,
'GRANTED': Keyword,
'GROUP': Keyword,
'GROUPING': Keyword,
'HANDLER': Keyword,
'HAVING': Keyword,
'HIERARCHY': Keyword,
'HOLD': Keyword,
'HOST': Keyword,
'IDENTITY': Keyword,
'IF': Keyword,
'IGNORE': Keyword,
'ILIKE': Keyword,
'IMMEDIATE': Keyword,
'IMMUTABLE': Keyword,
'IMPLEMENTATION': Keyword,
'IMPLICIT': Keyword,
'IN': Keyword,
'INCLUDING': Keyword,
'INCREMENT': Keyword,
'INDEX': Keyword,
'INDITCATOR': Keyword,
'INFIX': Keyword,
'INHERITS': Keyword,
'INITIALIZE': Keyword,
'INITIALLY': Keyword,
'INNER': Keyword,
'INOUT': Keyword,
'INPUT': Keyword,
'INSENSITIVE': Keyword,
'INSTANTIABLE': Keyword,
'INSTEAD': Keyword,
'INTERSECT': Keyword,
'INTO': Keyword,
'INVOKER': Keyword,
'IS': Keyword,
'ISNULL': Keyword,
'ISOLATION': Keyword,
'ITERATE': Keyword,
'JOIN': Keyword,
'K': Keyword,
'KEY': Keyword,
'KEY_MEMBER': Keyword,
'KEY_TYPE': Keyword,
'LANCOMPILER': Keyword,
'LANGUAGE': Keyword,
'LARGE': Keyword,
'LAST': Keyword,
'LATERAL': Keyword,
'LEADING': Keyword,
'LEFT': Keyword,
'LENGTH': Keyword,
'LESS': Keyword,
'LEVEL': Keyword,
'LIKE': Keyword,
'LIMIT': Keyword,
'LISTEN': Keyword,
'LOAD': Keyword,
'LOCAL': Keyword,
'LOCALTIME': Keyword,
'LOCALTIMESTAMP': Keyword,
'LOCATION': Keyword,
'LOCATOR': Keyword,
'LOCK': Keyword,
'LOWER': Keyword,
'M': Keyword,
'MAP': Keyword,
'MATCH': Keyword,
'MAX': Keyword,
'MAXVALUE': Keyword,
'MESSAGE_LENGTH': Keyword,
'MESSAGE_OCTET_LENGTH': Keyword,
'MESSAGE_TEXT': Keyword,
'METHOD': Keyword,
'MIN': Keyword,
'MINUTE': Keyword,
'MINVALUE': Keyword,
'MOD': Keyword,
'MODE': Keyword,
'MODIFIES': Keyword,
'MODIFY': Keyword,
'MONTH': Keyword,
'MORE': Keyword,
'MOVE': Keyword,
'MUMPS': Keyword,
'NAMES': Keyword,
'NATIONAL': Keyword,
'NATURAL': Keyword,
'NCHAR': Keyword,
'NCLOB': Keyword,
'NEW': Keyword,
'NEXT': Keyword,
'NO': Keyword,
'NOCREATEDB': Keyword,
'NOCREATEUSER': Keyword,
'NONE': Keyword,
'NOT': Keyword,
'NOTHING': Keyword,
'NOTIFY': Keyword,
'NOTNULL': Keyword,
'NULL': Keyword,
'NULLABLE': Keyword,
'NULLIF': Keyword,
'OBJECT': Keyword,
'OCTET_LENGTH': Keyword,
'OF': Keyword,
'OFF': Keyword,
'OFFSET': Keyword,
'OIDS': Keyword,
'OLD': Keyword,
'ON': Keyword,
'ONLY': Keyword,
'OPEN': Keyword,
'OPERATION': Keyword,
'OPERATOR': Keyword,
'OPTION': Keyword,
'OPTIONS': Keyword,
'OR': Keyword,
'ORDER': Keyword,
'ORDINALITY': Keyword,
'OUT': Keyword,
'OUTER': Keyword,
'OUTPUT': Keyword,
'OVERLAPS': Keyword,
'OVERLAY': Keyword,
'OVERRIDING': Keyword,
'OWNER': Keyword,
'PAD': Keyword,
'PARAMETER': Keyword,
'PARAMETERS': Keyword,
'PARAMETER_MODE': Keyword,
'PARAMATER_NAME': Keyword,
'PARAMATER_ORDINAL_POSITION': Keyword,
'PARAMETER_SPECIFIC_CATALOG': Keyword,
'PARAMETER_SPECIFIC_NAME': Keyword,
'PARAMATER_SPECIFIC_SCHEMA': Keyword,
'PARTIAL': Keyword,
'PASCAL': Keyword,
'PENDANT': Keyword,
'PLACING': Keyword,
'PLI': Keyword,
'POSITION': Keyword,
'POSTFIX': Keyword,
'PRECISION': Keyword,
'PREFIX': Keyword,
'PREORDER': Keyword,
'PREPARE': Keyword,
'PRESERVE': Keyword,
'PRIMARY': Keyword,
'PRIOR': Keyword,
'PRIVILEGES': Keyword,
'PROCEDURAL': Keyword,
'PROCEDURE': Keyword,
'PUBLIC': Keyword,
'RAISE': Keyword,
'READ': Keyword,
'READS': Keyword,
'RECHECK': Keyword,
'RECURSIVE': Keyword,
'REF': Keyword,
'REFERENCES': Keyword,
'REFERENCING': Keyword,
'REINDEX': Keyword,
'RELATIVE': Keyword,
'RENAME': Keyword,
'REPEATABLE': Keyword,
'REPLACE': Keyword,
'RESET': Keyword,
'RESTART': Keyword,
'RESTRICT': Keyword,
'RESULT': Keyword,
'RETURN': Keyword,
'RETURNED_LENGTH': Keyword,
'RETURNED_OCTET_LENGTH': Keyword,
'RETURNED_SQLSTATE': Keyword,
'RETURNS': Keyword,
'REVOKE': Keyword,
'RIGHT': Keyword,
'ROLE': Keyword,
'ROLLBACK': Keyword,
'ROLLUP': Keyword,
'ROUTINE': Keyword,
'ROUTINE_CATALOG': Keyword,
'ROUTINE_NAME': Keyword,
'ROUTINE_SCHEMA': Keyword,
'ROW': Keyword,
'ROWS': Keyword,
'ROW_COUNT': Keyword,
'RULE': Keyword,
'SAVE_POINT': Keyword,
'SCALE': Keyword,
'SCHEMA': Keyword,
'SCHEMA_NAME': Keyword,
'SCOPE': Keyword,
'SCROLL': Keyword,
'SEARCH': Keyword,
'SECOND': Keyword,
'SECURITY': Keyword,
'SELF': Keyword,
'SENSITIVE': Keyword,
'SERIALIZABLE': Keyword,
'SERVER_NAME': Keyword,
'SESSION': Keyword,
'SESSION_USER': Keyword,
'SETOF': Keyword,
'SETS': Keyword,
'SHARE': Keyword,
'SHOW': Keyword,
'SIMILAR': Keyword,
'SIMPLE': Keyword,
'SIZE': Keyword,
'SOME': Keyword,
'SOURCE': Keyword,
'SPACE': Keyword,
'SPECIFIC': Keyword,
'SPECIFICTYPE': Keyword,
'SPECIFIC_NAME': Keyword,
'SQL': Keyword,
'SQLCODE': Keyword,
'SQLERROR': Keyword,
'SQLEXCEPTION': Keyword,
'SQLSTATE': Keyword,
'SQLWARNINIG': Keyword,
'STABLE': Keyword,
'START': Keyword,
'STATE': Keyword,
'STATEMENT': Keyword,
'STATIC': Keyword,
'STATISTICS': Keyword,
'STDIN': Keyword,
'STDOUT': Keyword,
'STORAGE': Keyword,
'STRICT': Keyword,
'STRUCTURE': Keyword,
'STYPE': Keyword,
'SUBCLASS_ORIGIN': Keyword,
'SUBLIST': Keyword,
'SUBSTRING': Keyword,
'SUM': Keyword,
'SYMMETRIC': Keyword,
'SYSID': Keyword,
'SYSTEM': Keyword,
'SYSTEM_USER': Keyword,
'TABLE': Keyword,
'TABLE_NAME': Keyword,
' TEMP': Keyword,
'TEMPLATE': Keyword,
'TEMPORARY': Keyword,
'TERMINATE': Keyword,
'THAN': Keyword,
'THEN': Keyword,
'TIMESTAMP': Keyword,
'TIMEZONE_HOUR': Keyword,
'TIMEZONE_MINUTE': Keyword,
'TO': Keyword,
'TOAST': Keyword,
'TRAILING': Keyword,
'TRANSATION': Keyword,
'TRANSACTIONS_COMMITTED': Keyword,
'TRANSACTIONS_ROLLED_BACK': Keyword,
'TRANSATION_ACTIVE': Keyword,
'TRANSFORM': Keyword,
'TRANSFORMS': Keyword,
'TRANSLATE': Keyword,
'TRANSLATION': Keyword,
'TREAT': Keyword,
'TRIGGER': Keyword,
'TRIGGER_CATALOG': Keyword,
'TRIGGER_NAME': Keyword,
'TRIGGER_SCHEMA': Keyword,
'TRIM': Keyword,
'TRUE': Keyword,
'TRUNCATE': Keyword,
'TRUSTED': Keyword,
'TYPE': Keyword,
'UNCOMMITTED': Keyword,
'UNDER': Keyword,
'UNENCRYPTED': Keyword,
'UNION': Keyword,
'UNIQUE': Keyword,
'UNKNOWN': Keyword,
'UNLISTEN': Keyword,
'UNNAMED': Keyword,
'UNNEST': Keyword,
'UNTIL': Keyword,
'UPPER': Keyword,
'USAGE': Keyword,
'USER': Keyword,
'USER_DEFINED_TYPE_CATALOG': Keyword,
'USER_DEFINED_TYPE_NAME': Keyword,
'USER_DEFINED_TYPE_SCHEMA': Keyword,
'USING': Keyword,
'VACUUM': Keyword,
'VALID': Keyword,
'VALIDATOR': Keyword,
'VALUES': Keyword,
'VARIABLE': Keyword,
'VERBOSE': Keyword,
'VERSION': Keyword,
'VIEW': Keyword,
'VOLATILE': Keyword,
'WHEN': Keyword,
'WHENEVER': Keyword,
'WHERE': Keyword,
'WITH': Keyword,
'WITHOUT': Keyword,
'WORK': Keyword,
'WRITE': Keyword,
'YEAR': Keyword,
'ZONE': Keyword,
'ARRAY': Name.Builtin,
'BIGINT': Name.Builtin,
'BINARY': Name.Builtin,
'BIT': Name.Builtin,
'BLOB': Name.Builtin,
'BOOLEAN': Name.Builtin,
'CHAR': Name.Builtin,
'CHARACTER': Name.Builtin,
'DATE': Name.Builtin,
'DEC': Name.Builtin,
'DECIMAL': Name.Builtin,
'FLOAT': Name.Builtin,
'INT': Name.Builtin,
'INTEGER': Name.Builtin,
'INTERVAL': Name.Builtin,
'NUMBER': Name.Builtin,
'NUMERIC': Name.Builtin,
'REAL': Name.Builtin,
'SERIAL': Name.Builtin,
'SMALLINT': Name.Builtin,
'VARCHAR': Name.Builtin,
'VARYING': Name.Builtin,
'INT8': Name.Builtin,
'SERIAL8': Name.Builtin,
'TEXT': Name.Builtin,
}
KEYWORDS_COMMON = {
'SELECT': Keyword.DML,
'INSERT': Keyword.DML,
'DELETE': Keyword.DML,
'UPDATE': Keyword.DML,
'DROP': Keyword.DDL,
'CREATE': Keyword.DDL,
'ALTER': Keyword.DDL,
'WHERE': Keyword,
'FROM': Keyword,
'INNER': Keyword,
'JOIN': Keyword,
'AND': Keyword,
'OR': Keyword,
'LIKE': Keyword,
'ON': Keyword,
'IN': Keyword,
'SET': Keyword,
'BY': Keyword,
'GROUP': Keyword,
'ORDER': Keyword,
'LEFT': Keyword,
'OUTER': Keyword,
'IF': Keyword,
'END': Keyword,
'THEN': Keyword,
'LOOP': Keyword,
'AS': Keyword,
'ELSE': Keyword,
'FOR': Keyword,
'CASE': Keyword,
'WHEN': Keyword,
'MIN': Keyword,
'MAX': Keyword,
'DISTINCT': Keyword,
}
|
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
"""Merging of policies."""
from typing import Dict, List, Set, cast
from .types import CategoryType, PolicyType
def merge_policies(policies: List[PolicyType]) -> PolicyType:
"""Merge policies."""
new_policy: Dict[str, CategoryType] = {}
seen: Set[str] = set()
for policy in policies:
for category in policy:
if category in seen:
continue
seen.add(category)
new_policy[category] = _merge_policies(
[policy.get(category) for policy in policies]
)
cast(PolicyType, new_policy)
return new_policy
def _merge_policies(sources: List[CategoryType]) -> CategoryType:
"""Merge a policy."""
# When merging policies, the most permissive wins.
# This means we order it like this:
# True > Dict > None
#
# True: allow everything
# Dict: specify more granular permissions
# None: no opinion
#
# If there are multiple sources with a dict as policy, we recursively
# merge each key in the source.
policy: CategoryType = None
seen: Set[str] = set()
for source in sources:
if source is None:
continue
# A source that's True will always win. Shortcut return.
if source is True:
return True
assert isinstance(source, dict)
if policy is None:
policy = cast(CategoryType, {})
assert isinstance(policy, dict)
for key in source:
if key in seen:
continue
seen.add(key)
key_sources = []
for src in sources:
if isinstance(src, dict):
key_sources.append(src.get(key))
policy[key] = _merge_policies(key_sources)
return policy
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
import sineTransformations as SMT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
freqScaling = np.array([0, .8, 1, 1.2])
ytfreq = SMT.sineFreqScaling(tfreq, freqScaling)
y = SM.sineModelSynth(ytfreq, tmag, np.array([]), Ns, H, fs)
mY, pY = STFT.stftAnal(y, fs, w, N, H)
UF.wavwrite(y,fs, 'sineModelFreqScale-orchestra.wav')
maxplotfreq = 4000.0
plt.figure(1, figsize=(9.5, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (orchestra.wav)')
plt.subplot(4,1,2)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,3)
numFrames = int(ytfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = ytfreq*np.less(ytfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('freq-scaled sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('sineModelFreqScale-orchestra.png')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.