gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""
Installs and configures heat
"""
import uuid
import logging
import os
from packstack.installer import utils
from packstack.installer import validators
from packstack.modules.ospluginutils import (getManifestTemplate,
manifestfiles,
appendManifestFile)
controller = None
# Plugin name
PLUGIN_NAME = "OS-HEAT"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Heat configuration")
parameters = [
{"CMD_OPTION" : "heat-host",
"USAGE" : ('The IP address of the server on which '
'to install Heat service'),
"PROMPT" : 'Enter the IP address of the Heat service',
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_HEAT_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "heat-mysql-password",
"USAGE" : 'The password used by Heat user to authenticate against MySQL',
"PROMPT" : "Enter the password for the Heat MySQL user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_HEAT_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "heat-ks-passwd",
"USAGE" : "The password to use for the Heat to authenticate with Keystone",
"PROMPT" : "Enter the password for the Heat Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_HEAT_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "os-heat-cloudwatch-install",
"USAGE" : ("Set to 'y' if you would like Packstack to "
"install Heat CloudWatch API"),
"PROMPT" : "Should Packstack install Heat CloudWatch API",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_HEAT_CLOUDWATCH_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-heat-cfn-install",
"USAGE" : ("Set to 'y' if you would like Packstack to "
"install Heat CloudFormation API"),
"PROMPT" : "Should Packstack install Heat CloudFormation API",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_HEAT_CFN_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
group = {"GROUP_NAME" : "Heat",
"DESCRIPTION" : "Heat Config parameters",
"PRE_CONDITION" : "CONFIG_HEAT_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, parameters)
parameters = [
{"CMD_OPTION" : "heat-api-cloudwatch-host",
"USAGE" : ('The IP address of the server on which '
'to install Heat CloudWatch API service'),
"PROMPT" : ('Enter the IP address of the Heat CloudWatch API '
'server'),
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_HEAT_CLOUDWATCH_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
group = {"GROUP_NAME" : "Heat CloudWatch API",
"DESCRIPTION" : "Heat CloudWatch API config parameters",
"PRE_CONDITION" : "CONFIG_HEAT_CLOUDWATCH_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, parameters)
parameters = [
{"CMD_OPTION" : "heat-api-cfn-host",
"USAGE" : ('The IP address of the server on which '
'to install Heat CloudFormation API service'),
"PROMPT" : ('Enter the IP address of the Heat CloudFormation '
'API server'),
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_HEAT_CFN_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
group = {"GROUP_NAME" : "Heat CloudFormation API",
"DESCRIPTION" : "Heat CloudFormation API config parameters",
"PRE_CONDITION" : "CONFIG_HEAT_CFN_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, parameters)
def initSequences(controller):
if controller.CONF['CONFIG_HEAT_INSTALL'] != 'y':
return
steps = [{'title': 'Adding Heat manifest entries',
'functions': [create_manifest]},
{'title': 'Adding Heat Keystone manifest entries',
'functions':[create_keystone_manifest]}]
if controller.CONF.get('CONFIG_HEAT_CLOUDWATCH_INSTALL', 'n') == 'y':
steps.append({'title': 'Adding Heat CloudWatch API manifest entries',
'functions': [create_cloudwatch_manifest]})
if controller.CONF.get('CONFIG_HEAT_CFN_INSTALL', 'n') == 'y':
steps.append({'title': 'Adding Heat CloudFormation API manifest entries',
'functions': [create_cfn_manifest]})
controller.addSequence("Installing Heat", [], [], steps)
def create_manifest(config):
if config['CONFIG_HEAT_CLOUDWATCH_INSTALL'] == 'y':
config['CONFIG_HEAT_WATCH_HOST'] = config['CONFIG_HEAT_CLOUDWATCH_HOST']
else:
config['CONFIG_HEAT_WATCH_HOST'] = config['CONFIG_HEAT_HOST']
if config['CONFIG_HEAT_CFN_INSTALL'] == 'y':
config['CONFIG_HEAT_METADATA_HOST'] = config['CONFIG_HEAT_CFN_HOST']
else:
config['CONFIG_HEAT_METADATA_HOST'] = config['CONFIG_HEAT_HOST']
manifestfile = "%s_heat.pp" % controller.CONF['CONFIG_HEAT_HOST']
manifestdata = getManifestTemplate("heat.pp")
appendManifestFile(manifestfile, manifestdata)
def create_keystone_manifest(config):
manifestfile = "%s_keystone.pp" % controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_heat.pp")
appendManifestFile(manifestfile, manifestdata)
def create_cloudwatch_manifest(config):
manifestfile = "%s_heatcw.pp" % controller.CONF['CONFIG_HEAT_CLOUDWATCH_HOST']
manifestdata = getManifestTemplate("heat_cloudwatch.pp")
appendManifestFile(manifestfile, manifestdata, marker='heat')
def create_cfn_manifest(config):
manifestfile = "%s_heatcnf.pp" % controller.CONF['CONFIG_HEAT_CFN_HOST']
manifestdata = getManifestTemplate("heat_cfn.pp")
appendManifestFile(manifestfile, manifestdata, marker='heat')
| |
#!/usr/local/bin/python2.7-32
"""
@brief Joystick control for the QuickBot.
@description This program is used to drive the QuickBot via a joystick (actually gamepad).
Currently setup for tank drive.
@author Rowland O'Flaherty (rowlandoflaherty.com)
@date 12/10/2013
@note Does not work with 64-bit python
This code was modified from http://www.pygame.org/docs/ref/joystick.html
@version: 1.0
@copyright: Copyright (C) 2014, Georgia Tech Research Corporation see the LICENSE file included with this software (see LINENSE file)
"""
import numpy as np
import pygame
import socket
import sys
# Parameters
sendFlag = True
pwmMinVal = 45
pwmMaxVal = 100
axisMinVal = 0.2
# Get input arguments
HOST = "192.168.1.101"
PORT = 5005
if len(sys.argv) > 2:
print 'Invalid number of command line arguments.'
print 'Proper syntax:'
print '>> joystickControl.py robotIP'
print 'Example:'
print '>> QuickBotRun.py ', HOST
sys.exit()
if len(sys.argv) == 2:
HOST = sys.argv[1]
if sendFlag:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Helper functions
def mapAxisToPWM(axisVal):
# Axis: -1 -> PWM: pwmMaxVal
# Axis: -axisMinVal -> pwmMinVal
if np.abs(axisVal) < axisMinVal:
pwm = 0
else:
scaling = (pwmMaxVal - pwmMinVal) / (1.0 - axisMinVal)
pwm = -scaling * (axisVal - np.sign(axisVal)*axisMinVal) - np.sign(axisVal)*pwmMinVal
return int(pwm)
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputing the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printScreen(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("QuickBot Joystick Control")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done=True
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
# if event.type == pygame.JOYBUTTONDOWN:
# print("Joystick button pressed.")
# if event.type == pygame.JOYBUTTONUP:
# print("Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.printScreen(screen, "Number of joysticks: {}".format(joystick_count) )
textPrint.indent()
axis = [0]*4
# For each joystick:
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
joystick.init()
textPrint.printScreen(screen, "Joystick {}".format(i) )
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.printScreen(screen, "Joystick name: {}".format(name) )
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.printScreen(screen, "Number of axes: {}".format(axes) )
textPrint.indent()
if i == 0:
for i in range( axes ):
axis[i] = joystick.get_axis( i )
textPrint.printScreen(screen, "Axis {} value: {:>6.3f}".format(i, axis[i]) )
pwm = mapAxisToPWM(axis[i])
if i == 1:
pwm_left = pwm
if i == 3:
pwm_right = pwm
textPrint.unindent()
textPrint.unindent()
cmdStr = "QuickBot Command:"
textPrint.printScreen(screen, cmdStr)
cmdStr = "$PWM=" + str(pwm_left) + "," + str(pwm_right) + "*\n"
textPrint.printScreen(screen, cmdStr)
textPrint.unindent()
if sendFlag:
sock.sendto(cmdStr, (HOST, PORT))
# buttons = joystick.get_numbuttons()
# textPrint.printScreen(screen, "Number of buttons: {}".format(buttons) )
# textPrint.indent()
# for i in range( buttons ):
# button = joystick.get_button( i )
# textPrint.printScreen(screen, "Button {:>2} value: {}".format(i,button) )
# textPrint.unindent()
# # Hat switch. All or nothing for direction, not like joysticks.
# # Value comes back in an array.
# hats = joystick.get_numhats()
# textPrint.printScreen(screen, "Number of hats: {}".format(hats) )
# textPrint.indent()
# for i in range( hats ):
# hat = joystick.get_hat( i )
# textPrint.printScreen(screen, "Hat {} value: {}".format(i, str(hat)) )
textPrint.unindent()
textPrint.unindent()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
| |
from markdown import markdown
from keras_autodoc import autogen
from keras_autodoc import get_methods
import pytest
import sys
import pathlib
from typing import Union, Optional, Tuple
from .dummy_package import dummy_module
from . import dummy_package
test_doc1 = {
"doc": """Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
Note that
One: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
Two: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
One: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword: argument of `RNN.__call__` (as well as `RNN.call`) method.
This: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
""",
"result": """Base class for recurrent layers.
__Arguments__
- __cell__: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
- __return_sequences__: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
- __return_state__: Boolean. Whether to return the last state
in addition to the output.
- __go_backwards__: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
- __stateful__: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
- __unroll__: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
- __input_dim__: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
- __input_length__: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
__Input shape__
3D tensor with shape `(batch_size, timesteps, input_dim)`.
__Output shape__
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
__Masking__
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
__Note on using statefulness in RNNs__
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
__Note on specifying the initial state of RNNs__
Note that
One: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
Two: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
One: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
__Note on passing external constants to RNNs__
You can pass "external" constants to the cell using the `constants`
keyword: argument of `RNN.__call__` (as well as `RNN.call`) method.
This: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
__Examples__
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
""",
}
test_doc_with_arguments_as_last_block = {
"doc": """Base class for recurrent layers.
# Arguments
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
""",
"result": """Base class for recurrent layers.
__Arguments__
- __return_sequences__: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
- __return_state__: Boolean. Whether to return the last state
in addition to the output.
""",
}
@pytest.mark.parametrize(
"docs_descriptor", [test_doc_with_arguments_as_last_block, test_doc1]
)
def test_doc_lists(docs_descriptor):
docstring = autogen.process_docstring(docs_descriptor["doc"])
assert markdown(docstring) == markdown(docs_descriptor["result"])
dummy_docstring = """Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
# Examples
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
# Numpy implementation
```python
def dot(x, y):
return dot(x, y)
```
"""
def test_doc_multiple_sections_code():
""" Checks that we can have code blocks in multiple sections."""
generated = autogen.process_docstring(dummy_docstring)
assert "# Theano-like behavior example" in generated
assert "def dot(x, y):" in generated
def check_against_expected(elements):
doc_generator = autogen.DocumentationGenerator(
project_url='www.dummy.com/my_project'
)
markdown_text = ''
for element in elements:
markdown_text += doc_generator._render(element)
current_file_path = pathlib.Path(__file__).resolve()
expected_file = current_file_path.parent / 'dummy_package' / 'expected.md'
expected_text = expected_file.read_text()
# we check that the generated html is the same
# to ignore blank lines or other differences not relevant.
assert markdown(markdown_text) == markdown(expected_text)
def test_generate_markdown():
elements = [dummy_module.Dense, dummy_module.ImageDataGenerator]
elements += get_methods(dummy_module.ImageDataGenerator)
elements.append(dummy_module.to_categorical)
check_against_expected(elements)
def test_generate_markdown_from_string():
elements = [
'tests.dummy_package.dummy_module.Dense',
'tests.dummy_package.dummy_module.ImageDataGenerator',
'tests.dummy_package.dummy_module.ImageDataGenerator.flow',
'tests.dummy_package.dummy_module.ImageDataGenerator.flow_from_directory',
'tests.dummy_package.dummy_module.to_categorical'
]
check_against_expected(elements)
@pytest.mark.parametrize('element', [
'tests.dummy_package.DataGenerator',
'tests.dummy_package.to_categorical'
])
def test_aliases_class_function(element):
doc_generator = autogen.DocumentationGenerator()
computed = doc_generator._render(element)
expected = element + '('
assert expected in computed
@pytest.mark.parametrize(['element', 'expected'], [
('tests.dummy_package.DataGenerator.flow', '\nDataGenerator.flow('),
('tests.dummy_package.DataGenerator.flow_from_directory',
'\nDataGenerator.flow_from_directory('),
])
def test_aliases_methods(element, expected):
doc_generator = autogen.DocumentationGenerator()
computed = doc_generator._render(element)
assert expected in computed
expected_dodo = """ dodo
```python
tests.dummy_package.dummy_module2.dodo(x)
```
Some dodo
----
"""
@pytest.mark.parametrize("titles_size", ["###", "##"])
def test_aliases_in_hints(titles_size):
pages = {'dod.md': ['tests.dummy_package.DataGenerator',
'tests.dummy_package.dummy_module2.dodo']}
doc_generator = autogen.DocumentationGenerator(pages=pages, titles_size=titles_size)
result = doc_generator._render('tests.dummy_package.dummy_module2.dodo')
assert result == titles_size + expected_dodo
class A:
def dodo(self):
"""Some docstring."""
pass
class B(A):
def dodo(self):
pass
def test_get_docstring_of_super_class():
computed = autogen.DocumentationGenerator()._render(B.dodo)
assert 'Some docstring' in computed
def water_plant(
self, amount: Union[int, float], fertilizer_type: Optional[str] = None
):
"""Give your plant some water.
# Arguments
amount: How much water to give.
fertilizer_type: What kind of fertilizer to add.
"""
pass
def test_types_in_docstring():
result = autogen.DocumentationGenerator()._render(water_plant)
assert "water_plant(self, amount, fertilizer_type=None)" in result
assert "- __amount__ `Union[int, float]`: How much" in result
assert "- __fertilizer_type__ `Optional[str]`: What" in result
def hard_method(self, arg: Union[int, Tuple[int, int]], arg2: int = 0) -> int:
"""Can we parse this?
# Arguments
arg: One or two integers.
arg2: One integer.
"""
pass
def test_hard_method():
generated = autogen.DocumentationGenerator()._render(hard_method)
assert "- __arg__ `Union[int, Tuple[int, int]]`: One or" in generated
assert "- __arg2__ `int`: One integer." in generated
def doing_things(an_argument: dummy_package.DataGenerator):
"""A function
# Arguments
an_argument: Some generator
"""
def test_rendinging_with_extra_alias():
extra_aliases = ["tests.dummy_package.DataGenerator"]
generated = autogen.DocumentationGenerator(extra_aliases=extra_aliases)._render(
doing_things)
assert "- __an_argument__ `tests.dummy_package.DataGenerator`: Some" in generated
def test_rendinging_with_extra_alias_custom_alias():
extra_aliases = {"tests.dummy_package.dummy_module.ImageDataGenerator":
"some.new.Thing"}
generated = autogen.DocumentationGenerator(extra_aliases=extra_aliases)._render(
doing_things)
assert "- __an_argument__ `some.new.Thing`: Some" in generated
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="the __future__ annotations only works with py37+."
)
def test_future_annotations():
from . import autogen_future
autogen_future.test_rendinging_with_extra_alias()
if __name__ == "__main__":
pytest.main([__file__])
| |
import unittest
import mock
from sure import expect
from simpleflow import activity, format, futures
from simpleflow.swf.executor import Executor
from swf.models.history import builder
from swf.responses import Response
from tests.data import DOMAIN, BaseTestWorkflow, increment
from tests.utils import MockSWFTestCase
@activity.with_attributes(task_priority=32)
def increment_high_priority(self, x):
return x + 1
class ExampleWorkflow(BaseTestWorkflow):
"""
Example workflow definition used in tests below.
"""
@property
def task_priority(self):
"""
Sets a default task priority as a dynamic value. We could also have used
task_priority = <num> on the class directly.
"""
return 12
def run(self):
a = self.submit(increment, 3)
b = self.submit(increment, 3, __priority=5)
c = self.submit(increment, 3, __priority=None)
d = self.submit(increment_high_priority, 3)
e = self.submit(increment_high_priority, 3, __priority=30)
futures.wait(a, b, c, d, e)
class TestSimpleflowSwfExecutor(MockSWFTestCase):
def test_submit_resolves_priority(self):
self.start_workflow_execution()
decisions = self.build_decisions(ExampleWorkflow).decisions
expect(decisions).to.have.length_of(5)
def get_task_priority(decision):
return decision["scheduleActivityTaskDecisionAttributes"].get(
"taskPriority"
)
# default priority for the whole workflow
expect(get_task_priority(decisions[0])).to.equal("12")
# priority passed explicitly
expect(get_task_priority(decisions[1])).to.equal("5")
# priority == None
expect(get_task_priority(decisions[2])).to.be.none
# priority set at decorator level
expect(get_task_priority(decisions[3])).to.equal("32")
# priority set at decorator level but overridden in self.submit()
expect(get_task_priority(decisions[4])).to.equal("30")
class TestCaseNotNeedingDomain(unittest.TestCase):
def test_get_event_details(self):
history = builder.History(ExampleWorkflow, input={})
signal_input = {"x": 42, "foo": "bar", "__propagate": False}
marker_details = {"baz": "bae"}
history.add_signal("a_signal", signal_input)
history.add_marker("a_marker", marker_details)
history.add_timer_started("a_timer", 1, decision_id=2)
history.add_timer_fired("a_timer")
executor = Executor(DOMAIN, ExampleWorkflow)
executor.replay(Response(history=history, execution=None))
details = executor.get_event_details("signal", "a_signal")
del details["timestamp"]
expect(details).to.equal(
{
"type": "signal",
"state": "signaled",
"name": "a_signal",
"input": signal_input,
"event_id": 4,
"external_initiated_event_id": 0,
"external_run_id": None,
"external_workflow_id": None,
}
)
details = executor.get_event_details("signal", "another_signal")
expect(details).to.be.none
details = executor.get_event_details("marker", "a_marker")
del details["timestamp"]
expect(details).to.equal(
{
"type": "marker",
"state": "recorded",
"name": "a_marker",
"details": marker_details,
"event_id": 5,
}
)
details = executor.get_event_details("marker", "another_marker")
expect(details).to.be.none
details = executor.get_event_details("timer", "a_timer")
del details["started_event_timestamp"]
del details["fired_event_timestamp"]
expect(details).to.equal(
{
"type": "timer",
"state": "fired",
"id": "a_timer",
"decision_task_completed_event_id": 2,
"start_to_fire_timeout": 1,
"started_event_id": 6,
"fired_event_id": 7,
"control": None,
}
)
details = executor.get_event_details("timer", "another_timer")
expect(details).to.be.none
@activity.with_attributes(raises_on_failure=True)
def print_me_n_times(s, n, raises=False):
if raises:
raise ValueError("Number: {}".format(s * n))
return s * n
class ExampleJumboWorkflow(BaseTestWorkflow):
"""
Example workflow definition used in tests below.
"""
def run(self, s, n, raises=False):
a = self.submit(print_me_n_times, s, n, raises=raises)
futures.wait(a)
return a.result
class TestSimpleflowSwfExecutorWithJumboFields(MockSWFTestCase):
@mock.patch.dict("os.environ", {"SIMPLEFLOW_JUMBO_FIELDS_BUCKET": "jumbo-bucket"})
def test_jumbo_fields_are_replaced_correctly(self):
# prepare
self.register_activity_type(
"tests.test_simpleflow.swf.test_executor.print_me_n_times", "default"
)
# start execution
self.start_workflow_execution(input='{"args": ["012345679", 10000]}')
# decider part
result = self.build_decisions(ExampleJumboWorkflow)
assert len(result.decisions) == 1
self.take_decisions(result.decisions, result.execution_context)
# worker part
self.process_activity_task()
# now check the history
events = self.get_workflow_execution_history()["events"]
activity_result_evt = events[-2]
assert activity_result_evt["eventType"] == "ActivityTaskCompleted"
result = activity_result_evt["activityTaskCompletedEventAttributes"]["result"]
expect(result).to.match(r"^simpleflow\+s3://jumbo-bucket/[a-z0-9-]+ 90002$")
@mock.patch.dict("os.environ", {"SIMPLEFLOW_JUMBO_FIELDS_BUCKET": "jumbo-bucket"})
def test_jumbo_fields_in_task_failed_is_decoded(self):
# prepare execution
self.register_activity_type(
"tests.test_simpleflow.swf.test_executor.print_me_n_times", "default"
)
# start execution
self.start_workflow_execution(
input='{"args": ["012345679", 10000], "kwargs": {"raises": true}}',
)
# decider part
result = self.build_decisions(ExampleJumboWorkflow)
assert len(result.decisions) == 1
self.take_decisions(result.decisions, result.execution_context)
# worker part
self.process_activity_task()
# now check the history
events = self.get_workflow_execution_history()["events"]
activity_result_evt = events[-2]
assert activity_result_evt["eventType"] == "ActivityTaskFailed"
attrs = activity_result_evt["activityTaskFailedEventAttributes"]
expect(attrs["reason"]).to.match(
r"simpleflow\+s3://jumbo-bucket/[a-z0-9-]+ 9\d{4}"
)
expect(attrs["details"]).to.match(
r"simpleflow\+s3://jumbo-bucket/[a-z0-9-]+ 9\d{4}"
)
details = format.decode(attrs["details"])
expect(details["error"]).to.equal("ValueError")
expect(len(details["message"])).to.be.greater_than(9 * 10000)
# decide again (should lead to workflow failure)
result = self.build_decisions(ExampleJumboWorkflow)
assert len(result.decisions) == 1
assert result.decisions[0]["decisionType"] == "FailWorkflowExecution"
self.take_decisions(result.decisions, result.execution_context)
# now check history again
events = self.get_workflow_execution_history()["events"]
event = events[-1]
assert event["eventType"] == "WorkflowExecutionFailed"
attrs = event["workflowExecutionFailedEventAttributes"]
details = format.decode(attrs["details"], use_proxy=False)
expect(details).to.be.a("dict")
expect(details["message"]).to.match(r"^Number: 012345.*")
reason = format.decode(attrs["reason"], use_proxy=False)
expect(reason).to.match(
r"^Workflow execution error in activity-tests.test_simpleflow.swf."
r'test_executor.print_me_n_times: "ValueError: Number: 012345679\d+"$'
)
| |
#!/usr/bin/env python3
# reproduce_character_models.py
# Reproduce predictive modeling of characters.
# This script assumes that you have subset.tar.gz
# in the parent directory of the /train_models directory
# directory. It also expects to have a /temp directory
# as a sibling (at the same level as /train_models).
# When it's asked to create a model it extracts
# characters from the tar.gz file and puts them
# in temp. Inefficient? Yes! But it means I can
# use versatiletrainer without having to edit it
# to take data from a tarfile.
# It also assumes that character metadata is in
# /metadata/balanced_character_subset.csv.
# Finally, it wants a folder '../models', again
# placed as a sibling, where it can put various
# intermediate lexicons and metadata.
import csv, os, sys, pickle, math, tarfile
import versatiletrainer as train
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
def select_subset_to_model(modelname, metadatapath, numexamples, startdate, enddate):
'''
Creates metadata for a model of gender trained on a balanced
sample of the whole timeline.
In keeping with Python practice, the date range is inclusive at the bottom,
but not the top.
It returns a path to the metadata created.
'''
allmeta = pd.read_csv(metadatapath)
timeslice = allmeta[(allmeta.firstpub >= startdate) & (allmeta.firstpub < enddate)]
m = timeslice[timeslice.gender == 'm']
f = timeslice[timeslice.gender == 'f']
msample = m.sample(n = numexamples)
fsample = f.sample(n = numexamples)
general_sample = pd.concat([msample, fsample])
outpath = '../models/' + modelname + '_meta.csv'
general_sample.to_csv(outpath)
return outpath, general_sample.docid
def authgender_subset_to_model(modelname, agender, metadatapath, numexamples, startdate, enddate):
'''
Creates metadata for a subset of characters drawn only from books
written by authors of a specified gender (agender).
It returns a path to the metadata created.
'''
allmeta = pd.read_csv(metadatapath)
timeslice = allmeta[(allmeta.authgender == agender) & (allmeta.firstpub >= startdate) & (allmeta.firstpub < enddate)]
m = timeslice[timeslice.gender == 'm']
f = timeslice[timeslice.gender == 'f']
msample = m.sample(n = numexamples)
fsample = f.sample(n = numexamples)
general_sample = pd.concat([msample, fsample])
outpath = '../models/' + modelname + '_meta.csv'
general_sample.to_csv(outpath)
return outpath, general_sample.docid
def subset_to_predict_authgender(modelname, metadatapath, num, startdate, enddate):
'''
Creates metadata that can be used to actually predict authgender.
It returns a path to the metadata created.
'''
allmeta = pd.read_csv(metadatapath)
timeslice = allmeta[(allmeta.firstpub >= startdate) & (allmeta.firstpub < enddate)]
mbym = timeslice[(timeslice.authgender == 'm') & (timeslice.gender == 'm')]
fbym = timeslice[(timeslice.authgender == 'm') & (timeslice.gender == 'f')]
mbyf = timeslice[(timeslice.authgender == 'f') & (timeslice.gender == 'm')]
fbyf = timeslice[(timeslice.authgender == 'f') & (timeslice.gender == 'f')]
general_sample = pd.concat([mbym.sample(n = num), fbym.sample(n = num),
mbyf.sample(n = num), fbyf.sample(n = num)])
outpath = '../models/' + modelname + '_meta.csv'
general_sample['tags'] = general_sample.authgender
# that's the line that actually ensures we are predicting
# author gender rather than character gender
general_sample.to_csv(outpath)
return outpath, np.mean(general_sample.firstpub)
def refresh_temp(list_of_docids):
'''
Empties the temporary folder and restocks it, using a list
of docids that are in subset.tar.gz.
'''
folder = '../temp'
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
with tarfile.open('../subset.tar.gz', 'r:gz') as tar:
for d in list_of_docids:
tarmember = 'charactersubset/' + d + '.tsv'
destination = '../temp/' + d + '.tsv'
data = tar.extractfile(tarmember).read().decode('utf-8')
with open(destination, mode = 'w', encoding = 'utf-8') as f:
f.write(data)
def gridsearch_a_model(metadatapath, sourcefolder, c_range, ftstart, ftend, ftstep, positive_tags = ['f'], negative_tags = ['m']):
''' Function does a gridsearch to identify an optimal number of features and setting of
the regularization constant; then produces that model. Note that we do not use this for
models of specific decades. Just initially for model selection.'''
modelname = metadatapath.replace('.//models/', '').replace('_meta.csv', '')
extension = '.tsv'
vocabpath = metadatapath.replace('_meta', '_vocab')
if os.path.exists(vocabpath):
print('Vocabulary for ' + modelname + ' already exists. Using it.')
outputpath = metadatapath.replace('_meta', '')
## EXCLUSIONS. # not used in this project
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
sizecap = 2000
# CLASSIFY CONDITIONS # not used in this project
testconditions = set()
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
matrix, rawaccuracy, allvolumes, coefficientuples = train.tune_a_model(paths, exclusions, classifyconditions, modelparams)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = train.diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
def crossvalidate_one_model(metadatapath, sourcefolder, c_range, ftstart, ftend, ftstep, positive_tags = ['f'], negative_tags = ['m']):
''' '''
modelname = metadatapath.replace('.//models/', '').replace('_meta.csv', '')
extension = '.tsv'
vocabpath = metadatapath.replace('_meta', '_vocab')
if os.path.exists(vocabpath):
os.unlink(vocabpath)
# we rebuild vocab each time
outputpath = metadatapath.replace('_meta', '')
## EXCLUSIONS. # not used in this project
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
sizecap = 2000
# CLASSIFY CONDITIONS # not used in this project
testconditions = set()
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
rawaccuracy, allvolumes, coefficientuples = train.crossvalidate_single_model(paths, exclusions, classifyconditions, modelparams)
print(rawaccuracy)
return rawaccuracy
def crossvalidate_across_L2_range(metadatapath, sourcefolder, c_range, ftstart, ftend, ftstep, positive_tags = ['f'], negative_tags = ['m']):
'''
For a given set of characters, crossvalidates a model at multiple
L2 settings, and returns all the accuracies.
'''
modelname = metadatapath.replace('.//models/', '').replace('_meta.csv', '')
extension = '.tsv'
vocabpath = metadatapath.replace('_meta', '_vocab')
if os.path.exists(vocabpath):
os.unlink(vocabpath)
# we rebuild vocab each time
outputpath = metadatapath.replace('_meta', '')
## EXCLUSIONS. # not used in this project
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
sizecap = 2000
# CLASSIFY CONDITIONS # not used in this project
testconditions = set()
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracydict = dict()
for c_setting in c_range:
cparam = [c_setting]
modelparams = 'logistic', 10, ftstart, ftend, ftstep, cparam
rawaccuracy, allvolumes, coefficientuples = train.crossvalidate_single_model(paths, exclusions, classifyconditions, modelparams)
accuracydict[c_setting] = rawaccuracy
return accuracydict
def applymodel(modelpath, metapath, outpath):
''' This function applies a specified model (modelpath) to a specified
metadata set (metapath), and sends the results to outpath.
'''
sourcefolder = '/Users/tunder/data/character_subset/'
extension = '.tsv'
metadatapath = metapath = '../metadata/balanced_character_subset.csv'
newmetadict = train.apply_pickled_model(modelpath, sourcefolder, extension, metadatapath)
print('Got predictions for that model.')
newmetadict.to_csv(outpath)
def correlate_models(firstpath, secondpath):
one = pd.read_csv(firstpath, index_col = 'docid')
two = pd.read_csv(secondpath, index_col = 'docid')
justpredictions = pd.concat([one['logistic'], two['logistic']], axis=1, keys=['one', 'two'])
justpredictions.dropna(inplace = True)
r, p = pearsonr(justpredictions.one, justpredictions.two)
return r
def comparison(selfmodel, othermodel, modelname):
totalvolumes = 0
right = 0
for v in selfmodel.index:
realgenre = selfmodel.loc[v, 'realclass']
v = str(v)
otherprediction = othermodel.loc[v, modelname]
if realgenre > .5 and otherprediction > 0.5:
right += 1
elif realgenre < .5 and otherprediction < 0.5:
right += 1
totalvolumes +=1
return totalvolumes, right
def getacc(filelist):
allofem = 0
allright = 0
for afile in filelist:
df = pd.read_csv(afile)
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df.logistic > 0.5))
tn = sum((df.realclass <= 0.5) & (df.logistic <= 0.5))
fp = sum((df.realclass <= 0.5) & (df.logistic > 0.5))
fn = sum((df.realclass > 0.5) & (df.logistic <= 0.5))
assert totalcount == (tp + fp + tn + fn)
allofem += totalcount
allright += (tp + tn)
return allright / allofem
if __name__ == '__main__':
args = sys.argv
command = args[1]
metapath = '../metadata/balanced_character_subset.csv'
sourcefolder = '/Users/tunder/data/character_subset/'
if command == 'optimize_general_model':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1000
featureend = 3200
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholetimeline', metapath,
numexamples = 800, startdate = 1780, enddate = 2010)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
if command == 'optimize_fifty_years':
# this option creates a model that can be used for comparison to
# the model of fictional prestige, which spans only 1850-1950
c_range = [.0001]
featurestart = 2450
featureend = 2700
featurestep = 50
generalmetapath, general_docids = select_subset_to_model('fiftypost1950', metapath, numexamples = 1500, startdate = 1950, enddate = 2050)
# The number of examples is higher here, because we want this model to be maximally
# accurate, and we're not trying to use this as a guide for other 800-character
# models.
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'test_decades':
c_range = [.0004]
featurestart = 2300
featureend = 2300
featurestep = 100
with open('../dataforR/speechlessdecademodels.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\taccuracy\n')
for dec in range (1790, 2010, 10):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 10
modelname = 'decade' + str(dec)
for i in range(15):
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracy = crossvalidate_one_model(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
f.write(str(dec) + '\t' + str(accuracy) + '\n')
elif command == 'optimize_20c':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1100
featureend = 3000
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholetwentieth', metapath,
numexamples = 800, startdate = 1900, enddate = 2000)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'optimize_19c':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1100
featureend = 3000
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholenineteenth', metapath,
numexamples = 800, startdate = 1800, enddate = 1900)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'optimize_thirty':
decade = int(args[2])
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1900
featureend = 3000
featurestep = 100
modelname = 'optimalthirty' + str(decade)
generalmetapath, general_docids = select_subset_to_model(modelname, metapath,
numexamples = 1500, startdate = decade - 10, enddate = decade + 20)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'decade_grid':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.00003, .0001, .0003, .001]
featurestart = 2200
featureend = 2200
featurestep = 100
with open('../dataforR/decadegrid.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\tL2\taccuracy\titer\n')
for dec in range (1790, 2010, 10):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 10
modelname = 'decade' + str(dec)
for i in range(15):
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(dec) + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'decade_grid_for_differentiation_plot':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.0001]
featurestart = 2300
featureend = 2300
featurestep = 100
for dec in range (1790, 2010, 10):
floor = dec - 10
ceiling = dec + 20
modelname = 'thirty' + str(dec)
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 1500,
startdate = floor, enddate = ceiling)
accuracy = crossvalidate_one_model(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
print(str(dec) + '\t' + str(accuracy) + '\n')
elif command == 'auth_specific_charpredict_grid':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.00003, .0001, .0003, .001]
featurestart = 2200
featureend = 2200
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
with open('../dataforR/auth_specific_charpredict.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\tauthgender\tL2\taccuracy\titer\n')
for dec in range (1800, 2000, 20):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 20
for agender in ['m', 'f']:
modelname = agender + 'author' + '_' + str(dec)
for i in range(5):
decademetapath, docids = authgender_subset_to_model(modelname, agender, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(dec) + '\t' + agender + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'predict_authgender':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.0001, .0003, .001, .003]
featurestart = 2500
featureend = 2500
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
with open('../dataforR/authgender_predictions.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('meandate\tL2\taccuracy\titer\n')
for dec in range (1795, 2010, 17):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 17
modelname = 'predict_authgender' + '_' + str(dec)
for i in range(9):
decademetapath, meandate = subset_to_predict_authgender(modelname, metapath, num = 400,
startdate = floor, enddate = ceiling)
# note that in this case num is not the total number of male or female examples,
# but the number for each cell of a 2x2 contingency matrix of author gender
# versus character gender so 400 produces 1600 total instances
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(meandate) + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'optimize_authgender':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008, .03, 1]
featurestart = 800
featureend = 3600
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
generalmetapath, general_docids = subset_to_predict_authgender('general_authgender', metapath,
num = 400, startdate = 1780, enddate = 2010)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'onlywomenwriters':
c_range = [.0003]
featurestart = 2500
featureend = 2600
featurestep = 100
womensmetapath, docids = authgender_subset_to_model('onlywomenwritersC', 'f', metapath, numexamples = 1500, startdate = 1800, enddate = 2000)
gridsearch_a_model(womensmetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
elif command == 'onlymalewriters':
c_range = [.0003]
featurestart = 2500
featureend = 2600
featurestep = 100
womensmetapath, docids = authgender_subset_to_model('onlymalewritersC', 'm', metapath, numexamples = 1500, startdate = 1800, enddate = 2000)
gridsearch_a_model(womensmetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
elif command == 'compare_models':
men = ['onlymalewriters', 'onlymalewritersB', 'onlymalewritersC']
women = ['onlywomenwriters', 'onlywomenwritersB', 'onlywomenwritersC']
# test_subset_path, test_docids = select_subset_to_model('test_metadata', metapath, numexamples = 1000, startdate = 1800, enddate = 2000)
test_subset_path = '../models/test_metadata_meta.csv'
generaloutpath = '/Users/tunder/Dropbox/python/character/future_work/appliedmodels/'
masculineperspective = []
feminineperspective = []
for m in men:
modelpath = '../models/' + m + '.pkl'
outpath = generaloutpath + m + '.results'
if not os.path.exists(outpath):
applymodel(modelpath, test_subset_path, outpath)
masculineperspective.append(outpath)
for w in women:
modelpath = '../models/' + w + '.pkl'
outpath = generaloutpath + w + '.results'
if not os.path.exists(outpath):
applymodel(modelpath, test_subset_path, outpath)
feminineperspective.append(outpath)
print('among men:')
r = []
r.append(correlate_models(masculineperspective[0], masculineperspective[1]))
r.append(correlate_models(masculineperspective[1], masculineperspective[2]))
r.append(correlate_models(masculineperspective[0], masculineperspective[2]))
print(sum(r) / len(r))
print('among women:')
r = []
r.append(correlate_models(feminineperspective[0], feminineperspective[1]))
r.append(correlate_models(feminineperspective[1], feminineperspective[2]))
r.append(correlate_models(feminineperspective[0], feminineperspective[2]))
print(sum(r) / len(r))
print('between genders:')
r = []
r.append(correlate_models(masculineperspective[0], feminineperspective[0]))
r.append(correlate_models(masculineperspective[1], feminineperspective[0]))
r.append(correlate_models(masculineperspective[1], feminineperspective[1]))
r.append(correlate_models(masculineperspective[1], feminineperspective[2]))
r.append(correlate_models(masculineperspective[0], feminineperspective[2]))
r.append(correlate_models(masculineperspective[2], feminineperspective[2]))
print(sum(r) / len(r))
else:
print("I don't know that command.")
| |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPCAPI
"""
import six
from nova.cells import rpcapi as cells_rpcapi
import nova.conf
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
CONF = nova.conf.CONF
class CellsAPITestCase(test.NoDBTestCase):
"""Test case for cells.api interfaces."""
def setUp(self):
super(CellsAPITestCase, self).setUp()
self.fake_topic = 'fake_topic'
self.fake_context = 'fake_context'
self.flags(topic=self.fake_topic, enable=True, group='cells')
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _stub_rpc_method(self, rpc_method, result):
call_info = {}
orig_prepare = self.cells_rpcapi.client.prepare
def fake_rpc_prepare(**kwargs):
if 'version' in kwargs:
call_info['version'] = kwargs.pop('version')
return self.cells_rpcapi.client
def fake_csv(version):
return orig_prepare(version).can_send_version()
def fake_rpc_method(ctxt, method, **kwargs):
call_info['context'] = ctxt
call_info['method'] = method
call_info['args'] = kwargs
return result
self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare)
self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv)
self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method)
return call_info
def _check_result(self, call_info, method, args, version=None):
self.assertEqual(self.fake_topic,
self.cells_rpcapi.client.target.topic)
self.assertEqual(self.fake_context, call_info['context'])
self.assertEqual(method, call_info['method'])
self.assertEqual(args, call_info['args'])
if version is not None:
self.assertIn('version', call_info)
self.assertIsInstance(call_info['version'], six.string_types,
msg="Message version %s is not a string" %
call_info['version'])
self.assertEqual(version, call_info['version'])
else:
self.assertNotIn('version', call_info)
def test_cast_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': False}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.cast_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
def test_call_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
fake_response = 'fake_response'
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': True}
call_info = self._stub_rpc_method('call', fake_response)
result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
self.assertEqual(fake_response, result)
def test_build_instances(self):
call_info = self._stub_rpc_method('cast', None)
instances = [objects.Instance(id=1),
objects.Instance(id=2)]
self.cells_rpcapi.build_instances(
self.fake_context, instances=instances,
image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
expected_args = {'build_inst_kwargs': {'instances': instances,
'image': {'fake': 'image'},
'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'build_instances',
expected_args, version='1.34')
def test_get_capacities(self):
capacity_info = {"capacity": "info"}
call_info = self._stub_rpc_method('call',
result=capacity_info)
result = self.cells_rpcapi.get_capacities(self.fake_context,
cell_name="name")
self._check_result(call_info, 'get_capacities',
{'cell_name': 'name'}, version='1.9')
self.assertEqual(capacity_info, result)
def test_instance_update_at_top(self):
fake_info_cache = objects.InstanceInfoCache(
instance_uuid=uuids.instance)
fake_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
fake_attrs = {'id': 2,
'cell_name': 'fake',
'metadata': {'fake': 'fake'},
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata}
fake_instance = objects.Instance(**fake_attrs)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_update_at_top',
expected_args, version='1.35')
def test_instance_destroy_at_top(self):
fake_instance = objects.Instance(uuid=uuids.instance)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_destroy_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_destroy_at_top',
expected_args, version='1.35')
def test_instance_delete_everywhere(self):
instance = fake_instance.fake_instance_obj(self.fake_context)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_delete_everywhere(
self.fake_context, instance,
'fake-type')
expected_args = {'instance': instance,
'delete_type': 'fake-type'}
self._check_result(call_info, 'instance_delete_everywhere',
expected_args, version='1.27')
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 2,
'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_fault_create_at_top(
self.fake_context, fake_instance_fault)
expected_args = {'instance_fault': fake_instance_fault}
self._check_result(call_info, 'instance_fault_create_at_top',
expected_args)
def test_bw_usage_update_at_top(self):
update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
'fake_ctr_out')
update_kwargs = {'last_refreshed': 'fake_refreshed'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bw_usage_update_at_top(
self.fake_context, *update_args, **update_kwargs)
bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_ctr_in',
'last_ctr_out': 'fake_ctr_out',
'last_refreshed': 'fake_refreshed'}
expected_args = {'bw_update_info': bw_update_info}
self._check_result(call_info, 'bw_usage_update_at_top',
expected_args)
def test_get_cell_info_for_neighbors(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_cell_info_for_neighbors(
self.fake_context)
self._check_result(call_info, 'get_cell_info_for_neighbors', {},
version='1.1')
self.assertEqual('fake_response', result)
def test_sync_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.sync_instances(self.fake_context,
project_id='fake_project', updated_since='fake_time',
deleted=True)
expected_args = {'project_id': 'fake_project',
'updated_since': 'fake_time',
'deleted': True}
self._check_result(call_info, 'sync_instances', expected_args,
version='1.1')
def test_service_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
fake_filters = {'key1': 'val1', 'key2': 'val2'}
result = self.cells_rpcapi.service_get_all(self.fake_context,
filters=fake_filters)
expected_args = {'filters': fake_filters}
self._check_result(call_info, 'service_get_all', expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_service_get_by_compute_host(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_get_by_compute_host(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'service_get_by_compute_host',
expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_get_host_uptime(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_host_uptime(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'get_host_uptime',
expected_args,
version='1.17')
self.assertEqual('fake_response', result)
def test_service_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_update(
self.fake_context, host_name='fake-host-name',
binary='nova-api', params_to_update={'disabled': True})
expected_args = {
'host_name': 'fake-host-name',
'binary': 'nova-api',
'params_to_update': {'disabled': True}}
self._check_result(call_info, 'service_update',
expected_args,
version='1.7')
self.assertEqual('fake_response', result)
def test_service_delete(self):
call_info = self._stub_rpc_method('call', None)
cell_service_id = 'cell@id'
result = self.cells_rpcapi.service_delete(
self.fake_context, cell_service_id=cell_service_id)
expected_args = {'cell_service_id': cell_service_id}
self._check_result(call_info, 'service_delete',
expected_args, version='1.26')
self.assertIsNone(result)
def test_proxy_rpc_to_manager(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.proxy_rpc_to_manager(
self.fake_context, rpc_message='fake-msg',
topic='fake-topic', call=True, timeout=-1)
expected_args = {'rpc_message': 'fake-msg',
'topic': 'fake-topic',
'call': True,
'timeout': -1}
self._check_result(call_info, 'proxy_rpc_to_manager',
expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_task_log_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.task_log_get_all(self.fake_context,
task_name='fake_name',
period_beginning='fake_begin',
period_ending='fake_end',
host='fake_host',
state='fake_state')
expected_args = {'task_name': 'fake_name',
'period_beginning': 'fake_begin',
'period_ending': 'fake_end',
'host': 'fake_host',
'state': 'fake_state'}
self._check_result(call_info, 'task_log_get_all', expected_args,
version='1.3')
self.assertEqual('fake_response', result)
def test_compute_node_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
hypervisor_match='fake-match')
expected_args = {'hypervisor_match': 'fake-match'}
self._check_result(call_info, 'compute_node_get_all', expected_args,
version='1.4')
self.assertEqual('fake_response', result)
def test_compute_node_stats(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_stats(self.fake_context)
expected_args = {}
self._check_result(call_info, 'compute_node_stats',
expected_args, version='1.4')
self.assertEqual('fake_response', result)
def test_compute_node_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get(self.fake_context,
'fake_compute_id')
expected_args = {'compute_id': 'fake_compute_id'}
self._check_result(call_info, 'compute_node_get',
expected_args, version='1.4')
self.assertEqual('fake_response', result)
def test_actions_get(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.actions_get(self.fake_context,
fake_instance)
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid']}
self._check_result(call_info, 'actions_get', expected_args,
version='1.5')
self.assertEqual('fake_response', result)
def test_actions_get_no_cell(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.actions_get, self.fake_context,
fake_instance)
def test_action_get_by_request_id(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
fake_instance,
'req-fake')
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid'],
'request_id': 'req-fake'}
self._check_result(call_info, 'action_get_by_request_id',
expected_args, version='1.5')
self.assertEqual('fake_response', result)
def test_action_get_by_request_id_no_cell(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_get_by_request_id,
self.fake_context, fake_instance, 'req-fake')
def test_action_events_get(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_events_get(self.fake_context,
fake_instance,
'fake-action')
expected_args = {'cell_name': 'region!child',
'action_id': 'fake-action'}
self._check_result(call_info, 'action_events_get', expected_args,
version='1.5')
self.assertEqual('fake_response', result)
def test_action_events_get_no_cell(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_events_get,
self.fake_context, fake_instance, 'fake-action')
def test_consoleauth_delete_tokens(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
uuids.instance)
expected_args = {'instance_uuid': uuids.instance}
self._check_result(call_info, 'consoleauth_delete_tokens',
expected_args, version='1.6')
def test_validate_console_port(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.validate_console_port(self.fake_context,
uuids.instance, 'fake-port', 'fake-type')
expected_args = {'instance_uuid': uuids.instance,
'console_port': 'fake-port',
'console_type': 'fake-type'}
self._check_result(call_info, 'validate_console_port',
expected_args, version='1.6')
self.assertEqual('fake_response', result)
def test_bdm_update_or_create_at_top(self):
fake_bdm = {'id': 2, 'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_update_or_create_at_top(
self.fake_context, fake_bdm, create='fake-create')
expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
self._check_result(call_info, 'bdm_update_or_create_at_top',
expected_args, version='1.28')
def test_bdm_destroy_at_top(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
uuids.instance,
device_name='fake-device',
volume_id='fake-vol')
expected_args = {'instance_uuid': uuids.instance,
'device_name': 'fake-device',
'volume_id': 'fake-vol'}
self._check_result(call_info, 'bdm_destroy_at_top',
expected_args, version='1.10')
def test_get_migrations(self):
call_info = self._stub_rpc_method('call', None)
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
self.cells_rpcapi.get_migrations(self.fake_context, filters)
expected_args = {'filters': filters}
self._check_result(call_info, 'get_migrations', expected_args,
version="1.11")
def test_instance_update_from_api(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_from_api(
self.fake_context, 'fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
expected_args = {'instance': 'fake-instance',
'expected_vm_state': 'exp_vm',
'expected_task_state': 'exp_task',
'admin_state_reset': 'admin_reset'}
self._check_result(call_info, 'instance_update_from_api',
expected_args, version='1.16')
def test_start_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.start_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'start_instance',
expected_args, version='1.12')
def test_stop_instance_cast(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=True,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': True,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
def test_stop_instance_call(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=False,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': False,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
self.assertEqual('fake_response', result)
def test_cell_create(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
expected_args = {'values': 'values'}
self._check_result(call_info, 'cell_create',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_update(self.fake_context,
'cell_name', 'values')
expected_args = {'cell_name': 'cell_name',
'values': 'values'}
self._check_result(call_info, 'cell_update',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_delete(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_delete(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_delete',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_get(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_get',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_reboot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reboot_instance(
self.fake_context, 'fake-instance',
block_device_info='ignored', reboot_type='HARD')
expected_args = {'instance': 'fake-instance',
'reboot_type': 'HARD'}
self._check_result(call_info, 'reboot_instance',
expected_args, version='1.14')
def test_pause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.pause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'pause_instance',
expected_args, version='1.19')
def test_unpause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.unpause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'unpause_instance',
expected_args, version='1.19')
def test_suspend_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.suspend_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'suspend_instance',
expected_args, version='1.15')
def test_resume_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resume_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'resume_instance',
expected_args, version='1.15')
def test_terminate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.terminate_instance(self.fake_context,
'fake-instance', [],
delete_type='delete')
expected_args = {'instance': 'fake-instance',
'delete_type': 'delete'}
self._check_result(call_info, 'terminate_instance',
expected_args, version='1.36')
def test_soft_delete_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.soft_delete_instance(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'soft_delete_instance',
expected_args, version='1.18')
def test_resize_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resize_instance(self.fake_context,
'fake-instance',
dict(cow='moo'),
'fake-hint',
'fake-flavor',
'fake-reservations',
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
'extra_instance_updates': dict(cow='moo'),
'clean_shutdown': True}
self._check_result(call_info, 'resize_instance',
expected_args, version='1.33')
def test_resize_instance_not_passing_request_spec(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resize_instance(self.fake_context,
'fake-instance',
dict(cow='moo'),
'fake-hint',
'fake-flavor',
'fake-reservations',
clean_shutdown=True,
request_spec='fake-spec')
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
'extra_instance_updates': dict(cow='moo'),
'clean_shutdown': True}
self._check_result(call_info, 'resize_instance',
expected_args, version='1.33')
def test_live_migrate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.live_migrate_instance(self.fake_context,
'fake-instance',
'fake-host',
'fake-block',
'fake-commit')
expected_args = {'instance': 'fake-instance',
'block_migration': 'fake-block',
'disk_over_commit': 'fake-commit',
'host_name': 'fake-host'}
self._check_result(call_info, 'live_migrate_instance',
expected_args, version='1.20')
def test_live_migrate_instance_not_passing_request_spec(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.live_migrate_instance(self.fake_context,
'fake-instance',
'fake-host',
'fake-block',
'fake-commit',
'fake-spec')
expected_args = {'instance': 'fake-instance',
'block_migration': 'fake-block',
'disk_over_commit': 'fake-commit',
'host_name': 'fake-host'}
self._check_result(call_info, 'live_migrate_instance',
expected_args, version='1.20')
def test_rebuild_instance_not_passing_request_spec(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.rebuild_instance(self.fake_context,
'fake-instance',
'fake-pass',
'fake-files',
'fake-image_ref',
'fake-orig_image_ref',
'fake-orig_sys_metadata',
'fake-bdms',
recreate=False,
on_shared_storage=False,
host=None,
preserve_ephemeral=False,
request_spec='fake-spec',
kwargs=None)
expected_args = {'instance': 'fake-instance',
'image_href': 'fake-image_ref',
'admin_password': 'fake-pass',
'files_to_inject': 'fake-files',
'preserve_ephemeral': False,
'kwargs': None}
self._check_result(call_info, 'rebuild_instance',
expected_args, version='1.25')
def test_revert_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.revert_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-dest',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'revert_resize',
expected_args, version='1.21')
def test_confirm_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.confirm_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-source',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'confirm_resize',
expected_args, version='1.21')
def test_reset_network(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reset_network(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'reset_network',
expected_args, version='1.22')
def test_inject_network_info(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.inject_network_info(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'inject_network_info',
expected_args, version='1.23')
def test_snapshot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.snapshot_instance(self.fake_context,
'fake-instance',
'image-id')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id'}
self._check_result(call_info, 'snapshot_instance',
expected_args, version='1.24')
def test_backup_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.backup_instance(self.fake_context,
'fake-instance',
'image-id',
'backup-type',
'rotation')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id',
'backup_type': 'backup-type',
'rotation': 'rotation'}
self._check_result(call_info, 'backup_instance',
expected_args, version='1.24')
def test_set_admin_password(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.set_admin_password(self.fake_context,
'fake-instance', 'fake-password')
expected_args = {'instance': 'fake-instance',
'new_pass': 'fake-password'}
self._check_result(call_info, 'set_admin_password',
expected_args, version='1.29')
def test_get_keypair_at_top(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_keypair_at_top(self.fake_context,
'fake_user_id', 'fake_name')
expected_args = {'user_id': 'fake_user_id',
'name': 'fake_name'}
self._check_result(call_info, 'get_keypair_at_top',
expected_args, version='1.37')
self.assertEqual(result, 'fake_response')
def test_get_keypair_at_top_with_not_found(self):
call_info = self._stub_rpc_method('call', None)
self.assertRaises(exception.KeypairNotFound,
self.cells_rpcapi.get_keypair_at_top,
self.fake_context, 'fake_user_id', 'fake_name')
expected_args = {'user_id': 'fake_user_id',
'name': 'fake_name'}
self._check_result(call_info, 'get_keypair_at_top',
expected_args, version='1.37')
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class EventRoutesOperations:
"""EventRoutesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.digitaltwins.core.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
event_routes_list_options: Optional["_models.EventRoutesListOptions"] = None,
**kwargs
) -> AsyncIterable["_models.EventRouteCollection"]:
"""Retrieves all event routes.
Status codes:
* 200 OK.
:param event_routes_list_options: Parameter group.
:type event_routes_list_options: ~azure.digitaltwins.core.models.EventRoutesListOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventRouteCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.digitaltwins.core.models.EventRouteCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventRouteCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
_max_items_per_page = None
if event_routes_list_options is not None:
_traceparent = event_routes_list_options.traceparent
_tracestate = event_routes_list_options.tracestate
_max_items_per_page = event_routes_list_options.max_items_per_page
api_version = "2020-10-31"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
if _max_items_per_page is not None:
header_parameters['max-items-per-page'] = self._serialize.header("max_items_per_page", _max_items_per_page, 'int')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('EventRouteCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/eventroutes'} # type: ignore
async def get_by_id(
self,
id: str,
event_routes_get_by_id_options: Optional["_models.EventRoutesGetByIdOptions"] = None,
**kwargs
) -> "_models.DigitalTwinsEventRoute":
"""Retrieves an event route.
Status codes:
* 200 OK
* 404 Not Found
* EventRouteNotFound - The event route was not found.
:param id: The id for an event route. The id is unique within event routes and case sensitive.
:type id: str
:param event_routes_get_by_id_options: Parameter group.
:type event_routes_get_by_id_options: ~azure.digitaltwins.core.models.EventRoutesGetByIdOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DigitalTwinsEventRoute, or the result of cls(response)
:rtype: ~azure.digitaltwins.core.models.DigitalTwinsEventRoute
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsEventRoute"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
if event_routes_get_by_id_options is not None:
_traceparent = event_routes_get_by_id_options.traceparent
_tracestate = event_routes_get_by_id_options.tracestate
api_version = "2020-10-31"
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DigitalTwinsEventRoute', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/eventroutes/{id}'} # type: ignore
async def add(
self,
id: str,
event_route: Optional["_models.DigitalTwinsEventRoute"] = None,
event_routes_add_options: Optional["_models.EventRoutesAddOptions"] = None,
**kwargs
) -> None:
"""Adds or replaces an event route.
Status codes:
* 204 No Content
* 400 Bad Request
* EventRouteEndpointInvalid - The endpoint provided does not exist or is not active.
* EventRouteFilterInvalid - The event route filter is invalid.
* EventRouteIdInvalid - The event route id is invalid.
* LimitExceeded - The maximum number of event routes allowed has been reached.
:param id: The id for an event route. The id is unique within event routes and case sensitive.
:type id: str
:param event_route: The event route data.
:type event_route: ~azure.digitaltwins.core.models.DigitalTwinsEventRoute
:param event_routes_add_options: Parameter group.
:type event_routes_add_options: ~azure.digitaltwins.core.models.EventRoutesAddOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
if event_routes_add_options is not None:
_traceparent = event_routes_add_options.traceparent
_tracestate = event_routes_add_options.tracestate
api_version = "2020-10-31"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.add.metadata['url'] # type: ignore
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if event_route is not None:
body_content = self._serialize.body(event_route, 'DigitalTwinsEventRoute')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
add.metadata = {'url': '/eventroutes/{id}'} # type: ignore
async def delete(
self,
id: str,
event_routes_delete_options: Optional["_models.EventRoutesDeleteOptions"] = None,
**kwargs
) -> None:
"""Deletes an event route.
Status codes:
* 204 No Content
* 404 Not Found
* EventRouteNotFound - The event route was not found.
:param id: The id for an event route. The id is unique within event routes and case sensitive.
:type id: str
:param event_routes_delete_options: Parameter group.
:type event_routes_delete_options: ~azure.digitaltwins.core.models.EventRoutesDeleteOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
if event_routes_delete_options is not None:
_traceparent = event_routes_delete_options.traceparent
_tracestate = event_routes_delete_options.tracestate
api_version = "2020-10-31"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/eventroutes/{id}'} # type: ignore
| |
# -*- coding: utf-8 -*-
# This script is designed to be run as a Web2Py application:
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py
# or
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py -A testscript
import argparse
import unittest
from gluon import current
from gluon.storage import Storage
current.data = Storage()
# @ToDo: Load these only when running Selenium tests
# (shouldn't be required for Smoke tests)
# (means removing the *)
from selenium import webdriver
from tests.asset import *
from tests.inv import *
from tests.member import *
from tests.org import *
from tests.project import *
from tests.staff import *
from tests.volunteer import *
from tests.helpers import *
from tests.event import *
def loadAllTests():
# Run the file private/templates/<current_template>/tests.py to get tests list.
path = os.path.join(request.folder,
"private", "templates",
settings.get_template(),
"tests.py")
if os.path.exists(path):
settings.exec_template(path)
else:
# Fallback to the default template tests.
path = os.path.join(request.folder,
"private", "templates",
"default",
"tests.py")
settings.exec_template(path)
tests_list = current.selenium_tests
loadTests = unittest.TestLoader().loadTestsFromTestCase
# Initialise the suite with the first test.
exec("suite = loadTests(%s)" % tests_list[0])
# Shortcut
addTests = suite.addTests
# Add all tests to the suite.
for i in range(1, len(tests_list)):
exec("addTests(loadTests(%s))" % tests_list[i])
return suite
# Set up the command line arguments
desc = "Script to run the Sahana Eden test suite."
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("-C", "--class",
help = "Name of class to run")
method_desc = """Name of method to run, this is used in conjunction with the
class argument or with the name of the class followed by the name of the method
separated with a period, class.method.
"""
parser.add_argument("-M",
"--method",
"--test",
help = method_desc)
parser.add_argument("-A",
"--auth",
help = "web2py default argument feed")
parser.add_argument("-V", "--verbose",
type = int,
default = 2,
help = "The level of verbose reporting")
parser.add_argument("--nohtml",
action='store_const',
const=True,
help = "Disable HTML reporting.")
parser.add_argument("--html-path",
help = "Path where the HTML report will be saved.",
default = "")
parser.add_argument("--html-name-date",
action='store_const',
const=True,
help = "Include just the date in the name of the HTML report.")
suite_desc = """This will execute a standard testing schedule. The valid values
are, smoke, quick, complete and full. If a method or class options is selected
the the suite will be ignored.
The suite options can be described as follows:
smoke: This will run the broken link test
quick: This will run all the tests marked as essential
complete: This will run all tests except those marked as long
full: This will run all tests
"""
parser.add_argument("--suite",
help = suite_desc,
choices = ["smoke", "roles", "quick", "complete", "full"],
default = "quick")
parser.add_argument("--link-depth",
type = int,
default = 16,
help = "The recursive depth when looking for links")
desc = """This will record the timings in a spreadsheet file. The data
will be accumulated over time holding a maximum of 100 results, The file will
automatically rotated. This will hold details for another program to analyse.
The file will be written to the same location as the HTML report.
"""
parser.add_argument("-r",
"--record-timings",
action='store_const',
const=True,
help = desc)
up_desc = """The user name and password, separated by a /. Multiple user name
and passwords can be added by separating them with a comma. If multiple user
name and passwords are provided then the same test will be run sequentially
using the given user in each case.
"""
parser.add_argument("--user-password",
default = "admin@example.com/testing",
help = up_desc)
parser.add_argument("--keep-browser-open",
help = "Keep the browser open once the tests have finished running",
action='store_const',
const = True)
parser.add_argument("--browser",
help = "Set the browser to use (Firefox/Chrome)",
action = "store",
default = "Firefox")
desc = """Run the smoke tests even if debug is set to true.
With debug on it can add up to a second per link and given that a full run
of the smoke tests will include thousands of links the difference of having
this setting on can be measured in hours.
"""
parser.add_argument("--force-debug",
action='store_const',
const=True,
help = desc)
desc = """Set a threshold in seconds.
If in the smoke tests it takes longer than this to get the link then it will be reported.
"""
parser.add_argument("--threshold",
type = int,
default = 10,
help = desc)
desc = """Smoke test report only.
Don't actually run the smoke tests but rebuild the smoke test report.
"""
parser.add_argument("--smoke-report",
action='store_const',
const=True,
help = desc)
argsObj = parser.parse_args()
args = argsObj.__dict__
active_driver = {'firefox': webdriver.Firefox,
'chrome': webdriver.Chrome}[args['browser'].lower()]
# Read Settings
settings = current.deployment_settings
public_url = settings.get_base_public_url()
base_url = "%s/%s" % (public_url, current.request.application)
system_name = settings.get_system_name()
# Store these to be available to modules
config = current.test_config = Storage()
config.system_name = system_name
config.timeout = 5 # seconds
config.url = base_url
base_dir = os.path.join(os.getcwd(), "applications", current.request.application)
test_dir = os.path.join(base_dir, "modules", "tests")
config.base_dir = base_dir
if not args["suite"] == "smoke" and settings.get_ui_navigate_away_confirm():
print "The tests will fail unless you have settings.ui.navigate_away_confirm = False in models/000_config.py"
exit()
if args["suite"] == "smoke" or args["suite"] == "complete":
if settings.get_base_debug() and not args["force_debug"]:
print "settings.base.debug is set to True in 000_config.py, either set it to False or use the --force-debug switch"
exit()
config.record_timings = args["record_timings"]
if config.record_timings:
path = args["html_path"]
config.record_timings_filename = os.path.join(path, "Sahana-Eden-record-timings.xls")
config.record_summary_filename = os.path.join(path, "Sahana-Eden-record-summary.xls")
config.verbose = args["verbose"]
browser_open = False
# @todo test with invalid class and methods passed as CLA
if args["method"]:
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
if args["class"]:
name = "%s.%s" % (args["class"], args["method"])
else:
name = args["method"]
suite = unittest.TestLoader().loadTestsFromName(args["method"],
globals()[args["class"]]
)
elif args["class"]:
# Run a single Selenium test
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[args["class"]])
elif args["suite"] == "smoke":
# Run Smoke tests
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite = unittest.TestSuite()
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
elif args["suite"] == "roles":
# Run Roles tests
from tests.roles.test_roles import *
suite = test_roles()
elif args["suite"] == "complete":
# Run all Selenium Tests & Smoke Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
else:
# Run all Selenium Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
config.html = False
if args["nohtml"]:
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
else:
try:
path = args["html_path"]
if args["html_name_date"]:
filename = "Sahana-Eden-%s.html" % current.request.now.date()
else:
filename = "Sahana-Eden-%s.html" % current.request.now
# Windows compatibility
filename = filename.replace(":", "-")
fullname = os.path.join(path, filename)
fp = open(fullname, "wb")
config.html = True
from tests.runner import EdenHTMLTestRunner
runner = EdenHTMLTestRunner(stream = fp,
title = "Sahana Eden",
verbosity = config.verbose,
)
runner.run(suite)
except ImportError:
config.html = False
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
# Cleanup
if browser_open and not args["keep_browser_open"]:
browser.close()
# END =========================================================================
| |
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreScene
import imath
class ShaderNetworkTest( unittest.TestCase ) :
def testParameter( self ) :
p = IECoreScene.ShaderNetwork.Parameter()
self.assertEqual( p.shader, "" )
self.assertEqual( p.name, "" )
self.assertIsInstance( p.shader, str )
self.assertIsInstance( p.name, str )
p = IECoreScene.ShaderNetwork.Parameter( "x" )
self.assertEqual( p.shader, "x" )
self.assertEqual( p.name, "" )
p = IECoreScene.ShaderNetwork.Parameter( "x", "diffuse" )
self.assertEqual( p.shader, "x" )
self.assertEqual( p.name, "diffuse" )
p.shader = "y"
self.assertEqual( p.shader, "y" )
p.name = "specular"
self.assertEqual( p.name, "specular" )
self.assertEqual(
repr( p ),
'Parameter( "y", "specular" )'
)
def testParameterAsBool( self ) :
self.assertFalse( IECoreScene.ShaderNetwork.Parameter() )
self.assertTrue( IECoreScene.ShaderNetwork.Parameter( "x", "" ) )
self.assertTrue( IECoreScene.ShaderNetwork.Parameter( "x", "y" ) )
self.assertTrue( IECoreScene.ShaderNetwork.Parameter( "", "y" ) )
def testConnection( self ) :
c = IECoreScene.ShaderNetwork.Connection()
self.assertEqual( c.source.shader, "" )
self.assertEqual( c.source.name, "" )
self.assertEqual( c.destination.shader, "" )
self.assertEqual( c.destination.name, "" )
p1 = IECoreScene.ShaderNetwork.Parameter( "x", "out" )
p2 = IECoreScene.ShaderNetwork.Parameter( "y", "in" )
c = IECoreScene.ShaderNetwork.Connection( p1, p2 )
self.assertEqual( c.source, p1 )
self.assertEqual( c.destination, p2 )
c.source.name = "out.r"
self.assertEqual( c.source.name, "out.r" )
self.assertEqual(
repr( c ),
'Connection( Parameter( "x", "out.r" ), Parameter( "y", "in" ) )'
)
def testConstructor( self ) :
shaders = {
"out" : IECoreScene.Shader( "flat", "surface" ),
"texture" : IECoreScene.Shader( "noise", "shader" ),
}
connections = [
IECoreScene.ShaderNetwork.Connection(
IECoreScene.ShaderNetwork.Parameter(
"texture", "outColor"
),
IECoreScene.ShaderNetwork.Parameter(
"out", "Cs"
)
)
]
n = IECoreScene.ShaderNetwork(
shaders = shaders,
connections = connections,
output = IECoreScene.ShaderNetwork.Parameter( "out" )
)
self.assertEqual( n.shaders(), shaders )
self.assertEqual( n.inputConnections( "out" ), connections )
self.assertEqual( n.outputConnections( "texture" ), connections )
self.assertEqual( n.getOutput(), IECoreScene.ShaderNetwork.Parameter( "out" ) )
def testShaderAccessors( self ) :
n = IECoreScene.ShaderNetwork()
s1 = IECoreScene.Shader()
s2 = IECoreScene.Shader()
n.addShader( "s1", s1 )
self.assertFalse( n.getShader( "s1" ).isSame( s1 ) )
self.assertEqual( n.getShader( "s1" ), s1 )
self.assertEqual( n.shaders(), { "s1" : s1 } )
n.addShader( "s2", s2 )
self.assertEqual( n.shaders(), { "s1" : s1, "s2" : s2 } )
self.assertFalse( n.getShader( "s2" ).isSame( s2 ) )
self.assertEqual( n.getShader( "s2" ), s2 )
n.removeShader( "s1" )
with self.assertRaises( RuntimeError ) :
n.removeShader( "s1" )
self.assertEqual( n.shaders(), { "s2" : s2 } )
self.assertEqual( n.getShader( "s1" ), None )
def testOutput( self ) :
n = IECoreScene.ShaderNetwork()
self.assertEqual( n.getOutput(), n.Parameter() )
s1 = IECoreScene.Shader( "constant" )
s2 = IECoreScene.Shader( "noise" )
n.addShader( "s1", s1 )
self.assertEqual( n.getOutput(), n.Parameter() )
self.assertEqual( n.outputShader(), None )
n.addShader( "s2", s2 )
self.assertEqual( n.getOutput(), n.Parameter() )
self.assertEqual( n.outputShader(), None )
n.setOutput( n.Parameter( "s1", "" ) )
self.assertEqual( n.getOutput(), n.Parameter( "s1", "" ) )
self.assertEqual( n.outputShader(), s1 )
n.removeShader( "s1" )
self.assertEqual( n.getOutput(), n.Parameter() )
self.assertEqual( n.outputShader(), None )
n.setOutput( n.Parameter( "s2", "" ) )
self.assertEqual( n.getOutput(), n.Parameter( "s2", "" ) )
self.assertEqual( n.outputShader(), s2 )
with self.assertRaisesRegexp( RuntimeError, "Output shader \"s1\" not in network" ) :
n.setOutput( n.Parameter( "s1", "" ) )
def testAddAndRemoveConnection( self ) :
n = IECoreScene.ShaderNetwork()
s1 = IECoreScene.Shader()
s2 = IECoreScene.Shader()
c = IECoreScene.ShaderNetwork.Connection(
IECoreScene.ShaderNetwork.Parameter(
"s1", "out",
),
IECoreScene.ShaderNetwork.Parameter(
"s2", "in",
),
)
with self.assertRaisesRegexp( RuntimeError, "Source shader \"s1\" not in network" ) :
n.addConnection( c )
n.addShader( "s1", s1 )
self.assertEqual( n.inputConnections( "s1" ), [] )
self.assertEqual( n.outputConnections( "s1" ), [] )
with self.assertRaisesRegexp( RuntimeError, "Destination shader \"s2\" not in network" ) :
n.addConnection( c )
n.addShader( "s2", s2 )
self.assertEqual( n.inputConnections( "s2" ), [] )
self.assertEqual( n.outputConnections( "s2" ), [] )
self.assertFalse( n.input( c.destination ) )
n.addConnection( c )
self.assertEqual( n.inputConnections( "s2" ), [ c ] )
self.assertEqual( n.outputConnections( "s1" ), [ c ] )
self.assertEqual( n.input( c.destination ), c.source )
n.removeConnection( c )
self.assertEqual( n.inputConnections( "s2" ), [] )
self.assertEqual( n.outputConnections( "s1" ), [] )
self.assertFalse( n.input( c.destination ) )
with self.assertRaisesRegexp( RuntimeError, "Connection \"s1.out -> s2.in\" not in network" ) :
n.removeConnection( c )
def testRemovingSourceShaderRemovesConnections( self ) :
n = IECoreScene.ShaderNetwork()
n.addShader( "s1", IECoreScene.Shader() )
n.addShader( "s2", IECoreScene.Shader() )
c = n.Connection( n.Parameter( "s1", "out" ), n.Parameter( "s2", "in" ) )
n.addConnection( c )
self.assertEqual( n.inputConnections( "s2" ), [ c ] )
self.assertEqual( n.outputConnections( "s1" ), [ c ] )
n.removeShader( "s1" )
self.assertEqual( n.inputConnections( "s2" ), [] )
def testRemovingDestinationShaderRemovesConnections( self ) :
n = IECoreScene.ShaderNetwork()
n.addShader( "s1", IECoreScene.Shader() )
n.addShader( "s2", IECoreScene.Shader() )
c = n.Connection( n.Parameter( "s1", "out" ), n.Parameter( "s2", "in" ) )
n.addConnection( c )
self.assertEqual( n.inputConnections( "s2" ), [ c ] )
self.assertEqual( n.outputConnections( "s1" ), [ c ] )
n.removeShader( "s2" )
self.assertEqual( n.outputConnections( "s1" ), [] )
def testHash( self ) :
hashes = set()
def assertHashUnique( s ) :
h = s.hash()
self.assertNotIn( h, hashes )
hashes.add( h )
n = IECoreScene.ShaderNetwork()
assertHashUnique( n )
s1 = IECoreScene.Shader()
s2 = IECoreScene.Shader()
n.addShader( "s1", s1 )
assertHashUnique( n )
n.addShader( "s2", s2 )
assertHashUnique( n )
n.addConnection(
IECoreScene.ShaderNetwork.Connection(
IECoreScene.ShaderNetwork.Parameter(
"s2", "out",
),
IECoreScene.ShaderNetwork.Parameter(
"s1", "in",
),
)
)
assertHashUnique( n )
n.setOutput( n.Parameter( "s2" ) )
assertHashUnique( n )
def testHashIsIndependentOfConstructionOrder( self ) :
s1 = IECoreScene.Shader( "constant", "surface" )
s2 = IECoreScene.Shader( "noise", "shader" )
n1 = IECoreScene.ShaderNetwork()
n2 = IECoreScene.ShaderNetwork()
self.assertEqual( n1.hash(), n2.hash() )
n1.addShader( "s1", s1 )
n2.addShader( "s2", s2 )
self.assertNotEqual( n1.hash(), n2.hash() )
n1.addShader( "s2", s2 )
n2.addShader( "s1", s1 )
self.assertEqual( n1.hash(), n2.hash() )
n2.setOutput( n2.Parameter( "s1" ) )
self.assertNotEqual( n1.hash(), n2.hash() )
n1.setOutput( n1.Parameter( "s1" ) )
self.assertEqual( n1.hash(), n2.hash() )
c = IECoreScene.ShaderNetwork.Connection(
IECoreScene.ShaderNetwork.Parameter(
"s2", "",
),
IECoreScene.ShaderNetwork.Parameter(
"s1", "Cs",
),
)
n1.addConnection( c )
self.assertNotEqual( n1.hash(), n2.hash() )
n2.addConnection( c )
self.assertEqual( n1.hash(), n2.hash() )
def testHashRepeatability( self ) :
s = [
IECoreScene.ShaderNetwork( shaders = { "flat" : IECoreScene.Shader( "flat" ) } )
for i in range( 0, 10 )
]
self.assertEqual( len( { x.hash().toString() for x in s } ), 1 )
def testEquality( self ) :
s1 = IECoreScene.Shader( "constant", "surface" )
s2 = IECoreScene.Shader( "noise", "shader" )
n1 = IECoreScene.ShaderNetwork()
n2 = IECoreScene.ShaderNetwork()
self.assertEqual( n1, n2 )
n1.addShader( "s1", s1 )
n2.addShader( "s2", s2 )
self.assertNotEqual( n1, n2 )
n1.addShader( "s2", s2 )
n2.addShader( "s1", s1 )
self.assertEqual( n1, n2 )
n2.setOutput( n2.Parameter( "s1" ) )
self.assertNotEqual( n1, n2 )
n1.setOutput( n2.Parameter( "s1" ) )
self.assertEqual( n1, n2 )
c = IECoreScene.ShaderNetwork.Connection(
IECoreScene.ShaderNetwork.Parameter(
"s2", "",
),
IECoreScene.ShaderNetwork.Parameter(
"s1", "Cs",
),
)
n1.addConnection( c )
# Ensure equality is order independent, as we compare lists,
# we need to ensure n2 being a super-set of n1 doesn't pass.
self.assertNotEqual( n1, n2 )
self.assertNotEqual( n2, n1 )
n2.addConnection( c )
self.assertEqual( n1, n2 )
def testEqualityIgnoresShaderIdentity( self ) :
s1 = IECoreScene.Shader( "constant", "surface" )
s2 = IECoreScene.Shader( "constant", "surface" )
self.assertEqual( s1, s2 )
n1 = IECoreScene.ShaderNetwork( { "s" : s1 } )
n2 = IECoreScene.ShaderNetwork( { "s" : s2 } )
self.assertEqual( n1, n2 )
def testCopy( self ) :
n1 = IECoreScene.ShaderNetwork(
shaders = {
"s1" : IECoreScene.Shader( "constant", "surface" ),
"s2" : IECoreScene.Shader( "noise", "shader" ),
},
connections = [
IECoreScene.ShaderNetwork.Connection(
IECoreScene.ShaderNetwork.Parameter(
"s2", "",
),
IECoreScene.ShaderNetwork.Parameter(
"s1", "Cs",
),
)
]
)
n2 = n1.copy()
self.assertEqual( n1, n2 )
def testSerialisation( self ) :
n = IECoreScene.ShaderNetwork(
shaders = {
"s1" : IECoreScene.Shader( "constant", "surface" ),
"s2" : IECoreScene.Shader( "noise", "shader" ),
},
connections = [
IECoreScene.ShaderNetwork.Connection(
IECoreScene.ShaderNetwork.Parameter(
"s2", "",
),
IECoreScene.ShaderNetwork.Parameter(
"s1", "Cs",
),
)
],
output = IECoreScene.ShaderNetwork.Parameter( "s1" )
)
m = IECore.MemoryIndexedIO( IECore.CharVectorData(), [], IECore.IndexedIO.OpenMode.Append )
n.save( m, "test" )
self.assertEqual( IECore.Object.load( m, "test" ), n )
def testPassParameterAsTuple( self ) :
c = IECoreScene.ShaderNetwork.Connection( ( "fromShader", "fromName" ), ( "toShader", "toName" ) )
self.assertEqual( c.source, IECoreScene.ShaderNetwork.Parameter( "fromShader", "fromName" ) )
self.assertEqual( c.destination, IECoreScene.ShaderNetwork.Parameter( "toShader", "toName" ) )
def testPassConnectionAsTuple( self ) :
n = IECoreScene.ShaderNetwork(
shaders = {
"s1" : IECoreScene.Shader( "constant", "surface" ),
"s2" : IECoreScene.Shader( "noise", "shader" ),
},
connections = [
( ( "s2", "" ), ( "s1", "Cs" ) ),
],
output = IECoreScene.ShaderNetwork.Parameter( "s1" )
)
def testShaderImmutability( self ) :
s = IECoreScene.Shader( "constant", "surface" )
n = IECoreScene.ShaderNetwork(
shaders = {
"s" : s
}
)
def assertImmutable( network, handle, shader ) :
self.assertFalse( network.getShader( handle ).isSame( shader ) )
self.assertEqual( network.getShader( handle ), shader )
shader.parameters["Cs"] = imath.Color3f( 1, 0, 0 )
self.assertFalse( network.getShader( handle ).isSame( shader ) )
self.assertNotEqual( network.getShader( handle ), shader )
# The ShaderNetwork should have taken a copy of the shader,
# so that we can no longer modify the network by modifying s.
assertImmutable( n, "s", s )
# The same applies when adding shaders via `addShader()` and
# `setShader()`.
s2 = IECoreScene.Shader( "constant", "surface" )
n.addShader( "s2", s2 )
assertImmutable( n, "s2", s2 )
s3 = IECoreScene.Shader( "constant", "surface" )
n.setShader( "s3", s3 )
assertImmutable( n, "s3", s3 )
def testAddShaderReturnValue( self ) :
s1 = IECoreScene.Shader()
s2 = IECoreScene.Shader()
n = IECoreScene.ShaderNetwork()
n.setShader( "s", s1 )
h = n.addShader( "s", s2 )
self.assertEqual( h, "s1" )
self.assertIsInstance( h, str )
def testMove( self ) :
IECoreScene.testShaderNetworkMove()
def testUniqueHandles( self ) :
n = IECoreScene.ShaderNetwork()
for i in range( 0, 20 ) :
n.addShader( "test", IECoreScene.Shader() )
self.assertEqual(
set( n.shaders().keys() ),
{ "test" } | { "test{0}".format( x ) for x in range( 1, 20 ) }
)
def testSubstitutions( self ):
def runSubstitutionTest( shader, attributes ):
n = IECoreScene.ShaderNetwork( shaders = { "s" : s } )
a = IECore.CompoundObject( attributes )
h = IECore.MurmurHash()
n.hashSubstitutions( a, h )
nSubst = n.copy()
nSubst.applySubstitutions( a )
return ( h, nSubst.getShader("s") )
s = IECoreScene.Shader( "test", "surface",IECore.CompoundData( {
"a" : IECore.StringData( "foo" ),
"b" : IECore.FloatData( 42.42 ),
"c" : IECore.StringVectorData( [ "foo", "bar" ] ),
} ) )
( h, sSubst ) = runSubstitutionTest( s, { "unused" : IECore.StringData( "blah" ) } )
self.assertEqual( h, IECore.MurmurHash() )
self.assertEqual( s, sSubst )
s = IECoreScene.Shader( "test", "surface",IECore.CompoundData( {
"a" : IECore.StringData( "pre<attr:fred>post" ),
"b" : IECore.FloatData( 42.42 ),
"c" : IECore.StringVectorData( [ "<attr:bob>", "pre<attr:carol>", "<attr:fred>post", "<attr:bob><attr:carol> <attr:fred>" ] ),
} ) )
( h, sSubst ) = runSubstitutionTest( s, { "unused" : IECore.StringData( "blah" ) } )
# Now that we've got substitutions, the hash should be non-default
self.assertNotEqual( h, IECore.MurmurHash() )
# Everything gets substituted to empty, because no matching attributes provided
self.assertNotEqual( s, sSubst )
self.assertEqual( sSubst.parameters["a"].value, "prepost" )
self.assertEqual( sSubst.parameters["c"][0], "" )
self.assertEqual( sSubst.parameters["c"][1], "pre" )
self.assertEqual( sSubst.parameters["c"][2], "post" )
self.assertEqual( sSubst.parameters["c"][3], " " )
( h2, sSubst2 ) = runSubstitutionTest( s, { "unused" : IECore.StringData( "blah2" ) } )
# The attribute being changed has no impact
self.assertEqual( h, h2 )
self.assertEqual( sSubst, sSubst2 )
( h3, sSubst3 ) = runSubstitutionTest( s, { "fred" : IECore.StringData( "CAT" ) } )
self.assertNotEqual( h, h3 )
self.assertNotEqual( s, sSubst3 )
self.assertEqual( sSubst3.parameters["a"].value, "preCATpost" )
self.assertEqual( sSubst3.parameters["c"][0], "" )
self.assertEqual( sSubst3.parameters["c"][1], "pre" )
self.assertEqual( sSubst3.parameters["c"][2], "CATpost" )
self.assertEqual( sSubst3.parameters["c"][3], " CAT" )
( h4, sSubst4 ) = runSubstitutionTest( s, { "fred" : IECore.StringData( "FISH" ) } )
self.assertNotEqual( h3, h4 )
self.assertEqual( sSubst4.parameters["c"][2], "FISHpost" )
allAttributes = {
"fred" : IECore.StringData( "FISH" ),
"bob" : IECore.StringData( "CAT" ),
"carol" : IECore.StringData( "BIRD" )
}
( h5, sSubst5 ) = runSubstitutionTest( s, allAttributes )
self.assertNotEqual( h4, h5 )
self.assertEqual( sSubst5.parameters["a"].value, "preFISHpost" )
self.assertEqual( sSubst5.parameters["c"][0], "CAT" )
self.assertEqual( sSubst5.parameters["c"][1], "preBIRD" )
self.assertEqual( sSubst5.parameters["c"][2], "FISHpost" )
self.assertEqual( sSubst5.parameters["c"][3], "CATBIRD FISH" )
# Support a variety of different ways of using backslashes to escape substitutions
s = IECoreScene.Shader( "test", "surface",IECore.CompoundData( {
"a" : IECore.StringData( "pre\<attr:fred\>post" ),
"b" : IECore.FloatData( 42.42 ),
"c" : IECore.StringVectorData( [ "\<attr:bob\>", "\<attr:carol>", "<attr:fred\>" ] ),
} ) )
( h6, sSubst6 ) = runSubstitutionTest( s, {} )
( h7, sSubst7 ) = runSubstitutionTest( s, allAttributes )
self.assertEqual( h6, h7 )
self.assertEqual( sSubst6, sSubst7 )
self.assertEqual( sSubst6.parameters["a"].value, "pre<attr:fred>post" )
self.assertEqual( sSubst6.parameters["c"][0], "<attr:bob>" )
self.assertEqual( sSubst6.parameters["c"][1], "<attr:carol>" )
self.assertEqual( sSubst6.parameters["c"][2], "<attr:fred>" )
if __name__ == "__main__":
unittest.main()
| |
""""Vendoring script, python 3.5 with requests needed"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import os
import re
import shutil
import tarfile
import zipfile
from pathlib import Path
import invoke
import requests
TASK_NAME = 'update'
FILE_WHITE_LIST = (
'Makefile',
'vendor.txt',
'__init__.py',
'README.rst',
)
# libraries that have directories with different names
LIBRARY_DIRNAMES = {
'setuptools': 'pkg_resources',
'msgpack-python': 'msgpack',
}
# from time to time, remove the no longer needed ones
HARDCODED_LICENSE_URLS = {
'pytoml': 'https://github.com/avakar/pytoml/raw/master/LICENSE',
'webencodings': 'https://github.com/SimonSapin/python-webencodings/raw/'
'master/LICENSE',
}
def drop_dir(path, **kwargs):
shutil.rmtree(str(path), **kwargs)
def remove_all(paths):
for path in paths:
if path.is_dir():
drop_dir(path)
else:
path.unlink()
def log(msg):
print('[vendoring.%s] %s' % (TASK_NAME, msg))
def _get_vendor_dir(ctx):
git_root = ctx.run('git rev-parse --show-toplevel', hide=True).stdout
return Path(git_root.strip()) / 'src' / 'pip' / '_vendor'
def clean_vendor(ctx, vendor_dir):
# Old _vendor cleanup
remove_all(vendor_dir.glob('*.pyc'))
log('Cleaning %s' % vendor_dir)
for item in vendor_dir.iterdir():
if item.is_dir():
shutil.rmtree(str(item))
elif item.name not in FILE_WHITE_LIST:
item.unlink()
else:
log('Skipping %s' % item)
def detect_vendored_libs(vendor_dir):
retval = []
for item in vendor_dir.iterdir():
if item.is_dir():
retval.append(item.name)
elif item.name.endswith(".pyi"):
continue
elif "LICENSE" in item.name or "COPYING" in item.name:
continue
elif item.name not in FILE_WHITE_LIST:
retval.append(item.name[:-3])
return retval
def rewrite_imports(package_dir, vendored_libs):
for item in package_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name.endswith('.py'):
rewrite_file_imports(item, vendored_libs)
def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text(encoding='utf-8')
# Revendor pkg_resources.extern first
text = re.sub(r'pkg_resources\.extern', r'pip._vendor', text)
text = re.sub(r'from \.extern', r'from pip._vendor', text)
for lib in vendored_libs:
text = re.sub(
r'(\n\s*|^)import %s(\n\s*)' % lib,
r'\1from pip._vendor import %s\2' % lib,
text,
)
text = re.sub(
r'(\n\s*|^)from %s(\.|\s+)' % lib,
r'\1from pip._vendor.%s\2' % lib,
text,
)
item.write_text(text, encoding='utf-8')
def apply_patch(ctx, patch_file_path):
log('Applying patch %s' % patch_file_path.name)
ctx.run('git apply --verbose %s' % patch_file_path)
def vendor(ctx, vendor_dir):
log('Reinstalling vendored libraries')
# We use --no-deps because we want to ensure that all of our dependencies
# are added to vendor.txt, this includes all dependencies recursively up
# the chain.
ctx.run(
'pip install -t {0} -r {0}/vendor.txt --no-compile --no-deps'.format(
str(vendor_dir),
)
)
remove_all(vendor_dir.glob('*.dist-info'))
remove_all(vendor_dir.glob('*.egg-info'))
# Cleanup setuptools unneeded parts
(vendor_dir / 'easy_install.py').unlink()
drop_dir(vendor_dir / 'setuptools')
drop_dir(vendor_dir / 'pkg_resources' / '_vendor')
drop_dir(vendor_dir / 'pkg_resources' / 'extern')
# Drop the bin directory (contains easy_install, distro, chardetect etc.)
# Might not appear on all OSes, so ignoring errors
drop_dir(vendor_dir / 'bin', ignore_errors=True)
# Drop interpreter and OS specific msgpack libs.
# Pip will rely on the python-only fallback instead.
remove_all(vendor_dir.glob('msgpack/*.so'))
# Detect the vendored packages/modules
vendored_libs = detect_vendored_libs(vendor_dir)
log("Detected vendored libraries: %s" % ", ".join(vendored_libs))
# Global import rewrites
log("Rewriting all imports related to vendored libs")
for item in vendor_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name not in FILE_WHITE_LIST:
rewrite_file_imports(item, vendored_libs)
# Special cases: apply stored patches
log("Apply patches")
patch_dir = Path(__file__).parent / 'patches'
for patch in patch_dir.glob('*.patch'):
apply_patch(ctx, patch)
def download_licenses(ctx, vendor_dir):
log('Downloading licenses')
tmp_dir = vendor_dir / '__tmp__'
ctx.run(
'pip download -r {0}/vendor.txt --no-binary '
':all: --no-deps -d {1}'.format(
str(vendor_dir),
str(tmp_dir),
)
)
for sdist in tmp_dir.iterdir():
extract_license(vendor_dir, sdist)
drop_dir(tmp_dir)
def extract_license(vendor_dir, sdist):
if sdist.suffixes[-2] == '.tar':
ext = sdist.suffixes[-1][1:]
with tarfile.open(sdist, mode='r:{}'.format(ext)) as tar:
found = find_and_extract_license(vendor_dir, tar, tar.getmembers())
elif sdist.suffixes[-1] == '.zip':
with zipfile.ZipFile(sdist) as zip:
found = find_and_extract_license(vendor_dir, zip, zip.infolist())
else:
raise NotImplementedError('new sdist type!')
if not found:
log('License not found in {}, will download'.format(sdist.name))
license_fallback(vendor_dir, sdist.name)
def find_and_extract_license(vendor_dir, tar, members):
found = False
for member in members:
try:
name = member.name
except AttributeError: # zipfile
name = member.filename
if 'LICENSE' in name or 'COPYING' in name:
if '/test' in name:
# some testing licenses in html5lib and distlib
log('Ignoring {}'.format(name))
continue
found = True
extract_license_member(vendor_dir, tar, member, name)
return found
def license_fallback(vendor_dir, sdist_name):
"""Hardcoded license URLs. Check when updating if those are still needed"""
libname = libname_from_dir(sdist_name)
if libname not in HARDCODED_LICENSE_URLS:
raise ValueError('No hardcoded URL for {} license'.format(libname))
url = HARDCODED_LICENSE_URLS[libname]
_, _, name = url.rpartition('/')
dest = license_destination(vendor_dir, libname, name)
log('Downloading {}'.format(url))
r = requests.get(url, allow_redirects=True)
r.raise_for_status()
dest.write_bytes(r.content)
def libname_from_dir(dirname):
"""Reconstruct the library name without it's version"""
parts = []
for part in dirname.split('-'):
if part[0].isdigit():
break
parts.append(part)
return '-'.join(parts)
def license_destination(vendor_dir, libname, filename):
"""Given the (reconstructed) library name, find appropriate destination"""
normal = vendor_dir / libname
if normal.is_dir():
return normal / filename
lowercase = vendor_dir / libname.lower()
if lowercase.is_dir():
return lowercase / filename
if libname in LIBRARY_DIRNAMES:
return vendor_dir / LIBRARY_DIRNAMES[libname] / filename
# fallback to libname.LICENSE (used for nondirs)
return vendor_dir / '{}.{}'.format(libname, filename)
def extract_license_member(vendor_dir, tar, member, name):
mpath = Path(name) # relative path inside the sdist
dirname = list(mpath.parents)[-2].name # -1 is .
libname = libname_from_dir(dirname)
dest = license_destination(vendor_dir, libname, mpath.name)
dest_relative = dest.relative_to(Path.cwd())
log('Extracting {} into {}'.format(name, dest_relative))
try:
fileobj = tar.extractfile(member)
dest.write_bytes(fileobj.read())
except AttributeError: # zipfile
dest.write_bytes(tar.read(member))
@invoke.task
def update_stubs(ctx):
vendor_dir = _get_vendor_dir(ctx)
vendored_libs = detect_vendored_libs(vendor_dir)
print("[vendoring.update_stubs] Add mypy stubs")
extra_stubs_needed = {
# Some projects need stubs other than a simple <name>.pyi
"six": [
"six.__init__",
"six.moves.__init__",
"six.moves.configparser",
],
# Some projects should not have stubs coz they're single file modules
"appdirs": [],
"contextlib2": [],
}
for lib in vendored_libs:
if lib not in extra_stubs_needed:
(vendor_dir / (lib + ".pyi")).write_text("from %s import *" % lib)
continue
for selector in extra_stubs_needed[lib]:
fname = selector.replace(".", os.sep) + ".pyi"
if selector.endswith(".__init__"):
selector = selector[:-9]
f_path = vendor_dir / fname
if not f_path.parent.exists():
f_path.parent.mkdir()
f_path.write_text("from %s import *" % selector)
@invoke.task(name=TASK_NAME, post=[update_stubs])
def main(ctx):
vendor_dir = _get_vendor_dir(ctx)
log('Using vendor dir: %s' % vendor_dir)
clean_vendor(ctx, vendor_dir)
vendor(ctx, vendor_dir)
download_licenses(ctx, vendor_dir)
log('Revendoring complete')
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DTransposeTest(test.TestCase):
def testConv2DTransposeSingleStride(self):
with self.cached_session():
strides = [1, 1, 1, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 6, 4, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
# We count the number of cells being added at the locations in the output.
# At the center, #cells=kernel_height * kernel_width
# At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
# At the borders, #cells=ceil(kernel_height/2)*kernel_width or
# kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[1] - 1
w_in = w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeSame(self):
with self.cached_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 12, 8, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeValid(self):
with self.cached_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 13, 9, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[2] - pad):
for h in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > pad and h < y_shape[
1] - 1 - pad
w_in = w % strides[2] == 0 and w > pad and w < y_shape[
2] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, h, w, k] = target
# copy values in the border
cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
cache_values[n, -1, :, k] = cache_values[n, -2, :, k]
self.assertAllClose(cache_values, value)
def testGradient(self):
x_shape = [2, 6, 4, 3]
f_shape = [3, 3, 2, 3]
y_shape = [2, 12, 8, 2]
strides = [1, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv2d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
def testConv2DTransposeSingleStrideNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 1, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 6, 4]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeSameNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 12, 8]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > 0 and h < y_shape[2] - 1
w_in = w % strides[3] == 0 and w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeValidNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 13, 9]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[3] - pad):
for h in xrange(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > pad and h < y_shape[
2] - 1 - pad
w_in = w % strides[3] == 0 and w > pad and w < y_shape[
3] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, k, h, w] = target
# copy values in the border
cache_values[n, k, :, 0] = cache_values[n, k, :, 1]
cache_values[n, k, :, -1] = cache_values[n, k, :, -2]
cache_values[n, k, 0, :] = cache_values[n, k, 1, :]
cache_values[n, k, -1, :] = cache_values[n, k, -2, :]
self.assertAllClose(cache_values, value)
@test_util.enable_c_shapes
def testConv2DTransposeShapeInference(self):
# Test case for 8972
initializer = random_ops.truncated_normal(
[3, 3, 5, 1], mean=0.0, stddev=0.01, dtype=dtypes.float32)
x = variables.Variable(random_ops.random_normal([3, 10, 5, 1]))
f = variable_scope.get_variable("f", initializer=initializer)
f_shape = array_ops.stack([array_ops.shape(x)[0], 10, 5, 5])
output = nn_ops.conv2d_transpose(
x, f, f_shape, strides=[1, 1, 1, 1], padding="SAME")
self.assertEqual(output.get_shape().as_list(), [3, 10, 5, 5])
if __name__ == "__main__":
test.main()
| |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" File for the builder window, the workspace of Vistrails
QBuilderWindow
"""
from PyQt4 import QtCore, QtGui
from core import system
from core.configuration import (get_vistrails_configuration,
get_vistrails_persistent_configuration)
from core.db.locator import DBLocator, FileLocator, XMLFileLocator, untitled_locator
from core.packagemanager import get_package_manager
from core.recent_vistrails import RecentVistrailList
import core.interpreter.cached
import core.system
from core.vistrail.pipeline import Pipeline
from core.vistrail.vistrail import Vistrail
from gui.application import VistrailsApplication
from gui.controlflow_assist import QControlFlowAssistDialog
from gui.graphics_view import QInteractiveGraphicsView
from gui.module_palette import QModulePalette
from gui.open_db_window import QOpenDBWindow
from gui.preferences import QPreferencesDialog
from gui.repository import QRepositoryDialog
from gui.shell import QShellDialog
from gui.debugger import QDebugger
from gui.pipeline_view import QPipelineView
from gui.theme import CurrentTheme
from gui.view_manager import QViewManager
from gui.vistrail_toolbar import QVistrailViewToolBar, QVistrailInteractionToolBar
from gui.vis_diff import QVisualDiff
from gui.utils import build_custom_window, show_info
from gui.collection.workspace import QWorkspaceWindow
from gui.collection.explorer import QExplorerWindow
from gui.collection.vis_log import QVisualLog
import sys
import db.services.vistrail
from gui import merge_gui
from db.services.io import SaveBundle
from core.thumbnails import ThumbnailCache
import gui.debug
from gui.mashups.mashups_manager import MashupsManager
################################################################################
class QBuilderWindow(QtGui.QMainWindow):
"""
QBuilderWindow is a main widget containing an editin area for
VisTrails and several tool windows. Also remarks that almost all
of QBuilderWindow components are floating dockwidget. This mimics
a setup of an IDE
"""
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
""" QBuilderWindow(parent: QWidget) -> QBuilderWindow
Construct the main window with menus, toolbar, and floating toolwindow
"""
QtGui.QMainWindow.__init__(self, parent, f)
self.title = 'VisTrails Builder'
self.setWindowTitle(self.title)
self.setStatusBar(QtGui.QStatusBar(self))
self.setDockNestingEnabled(True)
self.viewManager = QViewManager()
self.setCentralWidget(self.viewManager)
self.modulePalette = QModulePalette(self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea,
self.modulePalette.toolWindow())
# self.queryPanel = QExplorerDialog(self)
# self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,
# self.queryPanel.toolWindow())
self.viewIndex = 0
self.dbDefault = False
self.recentVistrailActs = []
conf = get_vistrails_configuration()
if conf.check('recentVistrailList'):
self.recentVistrailLocators = RecentVistrailList.unserialize(
conf.recentVistrailList)
else:
self.recentVistrailLocators = RecentVistrailList()
conf.subscribe('maxRecentVistrails', self.max_recent_vistrails_changed)
self.createActions()
self.createMenu()
self.update_recent_vistrail_actions()
self.createToolBar()
self.connectSignals()
self.workspace = QWorkspaceWindow(self)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea,
self.workspace)
self.provenanceBrowser = QExplorerWindow(self)
self.workspace.hide()
self.shell = None
self.debugger = None
# If this is true, we're currently executing a pipeline, so
# We can't allow other executions.
self._executing = False
# This keeps track of the menu items for each package
self._package_menu_items = {}
self.detachedHistoryView = getattr(get_vistrails_configuration(), 'detachHistoryView')
def create_first_vistrail(self):
""" create_first_vistrail() -> None
Create untitled vistrail in interactive mode
"""
# FIXME: when interactive and non-interactive modes are separated,
# this autosave code can move to the viewManager
if not self.dbDefault and untitled_locator().has_temporaries():
if not FileLocator().prompt_autosave(self):
untitled_locator().clean_temporaries()
if self.viewManager.newVistrail(True):
self.viewModeChanged(0)
self.viewManager.set_first_view(self.viewManager.currentView())
def sizeHint(self):
""" sizeHint() -> QRect
Return the recommended size of the builder window
"""
return QtCore.QSize(1280, 768)
def closeEvent(self, e):
""" closeEvent(e: QCloseEvent) -> None
Close the whole application when the builder is closed
"""
if not self.quitVistrails():
e.ignore()
def keyPressEvent(self, event):
""" keyPressEvent(event: QKeyEvent) -> None
Capture modifiers (Ctrl, Alt, Shift) and send them to one of
the widget under the mouse cursor. It first starts at the
widget directly under the mouse and check if the widget has
property named captureModifiers. If yes, it calls
'modifiersPressed' function
"""
if event.key() in [QtCore.Qt.Key_Control,
QtCore.Qt.Key_Alt,
QtCore.Qt.Key_Shift,
QtCore.Qt.Key_Meta]:
widget = QtGui.QApplication.widgetAt(QtGui.QCursor.pos())
if widget:
while widget:
if widget.property('captureModifiers').isValid():
if hasattr(widget, 'modifiersPressed'):
widget.modifiersPressed(event.modifiers())
break
widget = widget.parent()
QtGui.QMainWindow.keyPressEvent(self, event)
# super(QBuilderWindow, self).keyPressEvent(event)
def keyReleaseEvent(self, event):
""" keyReleaseEvent(event: QKeyEvent) -> None
Capture modifiers (Ctrl, Alt, Shift) and send them to one of
the widget under the mouse cursor. It first starts at the
widget directly under the mouse and check if the widget has
property named captureModifiers. If yes, it calls
'modifiersReleased' function
"""
if event.key() in [QtCore.Qt.Key_Control,
QtCore.Qt.Key_Alt,
QtCore.Qt.Key_Shift,
QtCore.Qt.Key_Meta]:
widget = QtGui.QApplication.widgetAt(QtGui.QCursor.pos())
if widget:
while widget:
if widget.property('captureModifiers').isValid():
if hasattr(widget, 'modifiersReleased'):
widget.modifiersReleased()
break
widget = widget.parent()
QtGui.QMainWindow.keyReleaseEvent(self, event)
def createActions(self):
""" createActions() -> None
Construct all menu/toolbar actions for builder window
"""
self.newVistrailAction = QtGui.QAction(CurrentTheme.NEW_VISTRAIL_ICON,
'&New', self)
self.newVistrailAction.setShortcut('Ctrl+N')
self.newVistrailAction.setStatusTip('Create a new vistrail')
self.openFileAction = QtGui.QAction(CurrentTheme.OPEN_VISTRAIL_ICON,
'&Open', self)
self.openFileAction.setShortcut('Ctrl+O')
self.openFileAction.setStatusTip('Open an existing vistrail from '
'a file')
self.create_recent_vistrail_actions()
self.importFileAction = QtGui.QAction(CurrentTheme.OPEN_VISTRAIL_DB_ICON,
'From DB...', self)
self.importFileAction.setStatusTip('Import an existing vistrail from '
'a database')
self.saveFileAction = QtGui.QAction(CurrentTheme.SAVE_VISTRAIL_ICON,
'&Save', self)
self.saveFileAction.setShortcut('Ctrl+S')
self.saveFileAction.setStatusTip('Save the current vistrail '
'to a file')
self.saveFileAction.setEnabled(False)
self.saveFileAsAction = QtGui.QAction('Save as...', self)
self.saveFileAsAction.setShortcut('Ctrl+Shift+S')
self.saveFileAsAction.setStatusTip('Save the current vistrail '
'to a different file location')
self.saveFileAsAction.setEnabled(False)
self.exportFileAction = QtGui.QAction('To DB...', self)
self.exportFileAction.setStatusTip('Export the current vistrail to '
'a database')
self.exportFileAction.setEnabled(False)
self.closeVistrailAction = QtGui.QAction('Close', self)
self.closeVistrailAction.setShortcut('Ctrl+W')
self.closeVistrailAction.setStatusTip('Close the current vistrail')
self.closeVistrailAction.setEnabled(False)
self.exportStableAction = QtGui.QAction('To Stable Version...',
self)
self.exportStableAction.setStatusTip('Save vistrail as XML according '
'to the older (stable) schema')
self.exportStableAction.setEnabled(True)
self.saveOpmAction = QtGui.QAction('OPM XML...', self)
self.saveOpmAction.setStatusTip('Saves provenance according to the'
'Open Provenance Model in XML')
self.saveOpmAction.setEnabled(True)
self.saveLogAction = QtGui.QAction('Log To XML...', self)
self.saveLogAction.setStatusTip('Save the execution log to '
'a file')
self.saveLogAction.setEnabled(True)
self.exportLogAction = QtGui.QAction('Log To DB...', self)
self.exportLogAction.setStatusTip('Save the execution log to '
'a database')
self.exportLogAction.setEnabled(True)
self.importWorkflowAction = QtGui.QAction('Workflow...', self)
self.importWorkflowAction.setStatusTip('Import a workflow from an '
'xml file')
self.importWorkflowAction.setEnabled(True)
self.saveWorkflowAction = QtGui.QAction('Workflow To XML...', self)
self.saveWorkflowAction.setStatusTip('Save the current workflow to '
'a file')
self.saveWorkflowAction.setEnabled(True)
self.exportWorkflowAction = QtGui.QAction('Workflow To DB...', self)
self.exportWorkflowAction.setStatusTip('Save the current workflow to '
'a database')
self.exportWorkflowAction.setEnabled(True)
self.saveRegistryAction = QtGui.QAction('Registry To XML...', self)
self.saveRegistryAction.setStatusTip('Save the current registry to '
'a file')
self.saveRegistryAction.setEnabled(True)
self.exportRegistryAction = QtGui.QAction('Registry To DB...', self)
self.exportRegistryAction.setStatusTip('Save the current registry to '
'a database')
self.exportRegistryAction.setEnabled(True)
self.savePDFAction = QtGui.QAction('PDF...', self)
self.savePDFAction.setStatusTip('Save the current view'
'to a PDF file')
self.savePDFAction.setEnabled(True)
self.quitVistrailsAction = QtGui.QAction('Quit', self)
self.quitVistrailsAction.setShortcut('Ctrl+Q')
self.quitVistrailsAction.setStatusTip('Exit Vistrails')
self.undoAction = QtGui.QAction(CurrentTheme.UNDO_ICON,
'Undo', self)
self.undoAction.setEnabled(False)
self.undoAction.setStatusTip('Undo the previous action')
self.undoAction.setShortcut('Ctrl+Z')
self.redoAction = QtGui.QAction(CurrentTheme.REDO_ICON,
'Redo', self)
self.redoAction.setEnabled(False)
self.redoAction.setStatusTip('Redo an undone action')
self.redoAction.setShortcut('Ctrl+Y')
self.copyAction = QtGui.QAction('Copy\tCtrl+C', self)
self.copyAction.setEnabled(False)
self.copyAction.setStatusTip('Copy selected modules in '
'the current pipeline view')
self.pasteAction = QtGui.QAction('Paste\tCtrl+V', self)
self.pasteAction.setEnabled(False)
self.pasteAction.setStatusTip('Paste copied modules in the clipboard '
'into the current pipeline view')
self.groupAction = QtGui.QAction('Group', self)
self.groupAction.setShortcut('Ctrl+G')
self.groupAction.setEnabled(False)
self.groupAction.setStatusTip('Group the '
'selected modules in '
'the current pipeline view')
self.ungroupAction = QtGui.QAction('Ungroup', self)
self.ungroupAction.setShortcut('Ctrl+Shift+G')
self.ungroupAction.setEnabled(False)
self.ungroupAction.setStatusTip('Ungroup the '
'selected groups in '
'the current pipeline view')
self.showGroupAction = QtGui.QAction('Show Group Pipeline', self)
self.showGroupAction.setEnabled(True)
self.showGroupAction.setStatusTip('Show the underlying pipelines '
'for the selected groups in '
'the current pipeline view')
self.makeAbstractionAction = QtGui.QAction('Make SubWorkflow', self)
self.makeAbstractionAction.setStatusTip('Create a subworkflow '
'from the selected modules')
self.convertToAbstractionAction = \
QtGui.QAction('Convert to SubWorkflow', self)
self.convertToAbstractionAction.setStatusTip('Convert selected group '
'to a subworkflow')
self.editAbstractionAction = QtGui.QAction("Edit SubWorkflow", self)
self.editAbstractionAction.setStatusTip("Edit a subworkflow")
self.importAbstractionAction = QtGui.QAction('Import SubWorkflow', self)
self.importAbstractionAction.setStatusTip('Import subworkflow from '
'a vistrail to local '
'subworkflows')
self.exportAbstractionAction = QtGui.QAction('Export SubWorkflows', self)
self.exportAbstractionAction.setStatusTip('Export subworkflows from '
'local subworkflows for '
'use in a package')
self.controlFlowAssistAction = QtGui.QAction('Control Flow Assistant', self)
self.controlFlowAssistAction.setStatusTip('Launch the Control Flow '
'Assistant with the selected modules')
self.selectAllAction = QtGui.QAction('Select All\tCtrl+A', self)
self.selectAllAction.setEnabled(False)
self.selectAllAction.setStatusTip('Select all modules in '
'the current pipeline view')
self.repositoryOptions = QtGui.QAction('Web Repository Options', self)
self.repositoryOptions.setEnabled(True)
self.repositoryOptions.setStatusTip('Add this VisTrail to VisTrails Repository')
self.editPreferencesAction = QtGui.QAction('Preferences...', self)
self.editPreferencesAction.setEnabled(True)
self.editPreferencesAction.setStatusTip('Edit system preferences')
self.workspaceAction = QtGui.QAction('Workspaces', self)
self.workspaceAction.setCheckable(True)
self.workspaceAction.setChecked(False)
self.provenanceBrowserAction = QtGui.QAction('Provenance Browser', self)
self.provenanceBrowserAction.setCheckable(True)
self.provenanceBrowserAction.setChecked(False)
self.shellAction = QtGui.QAction(CurrentTheme.CONSOLE_MODE_ICON,
'VisTrails Console', self)
self.shellAction.setCheckable(True)
self.shellAction.setShortcut('Ctrl+H')
self.debugAction = QtGui.QAction('VisTrails Debugger', self)
self.debugAction.setCheckable(True)
self.debugAction.setChecked(False)
self.messagesAction = QtGui.QAction('VisTrails Messages', self)
self.messagesAction.setCheckable(True)
self.messagesAction.setChecked(False)
self.pipViewAction = QtGui.QAction('Picture-in-Picture', self)
self.pipViewAction.setCheckable(True)
self.pipViewAction.setChecked(True)
self.methodsViewAction = QtGui.QAction('Methods Panel', self)
self.methodsViewAction.setCheckable(True)
self.methodsViewAction.setChecked(True)
self.setMethodsViewAction = QtGui.QAction('Set Methods Panel', self)
self.setMethodsViewAction.setCheckable(True)
self.setMethodsViewAction.setChecked(True)
self.propertiesViewAction = QtGui.QAction('Properties Panel', self)
self.propertiesViewAction.setCheckable(True)
self.propertiesViewAction.setChecked(True)
self.propertiesOverlayAction = QtGui.QAction('Properties Overlay', self)
self.propertiesOverlayAction.setCheckable(True)
self.propertiesOverlayAction.setChecked(False)
self.expandBranchAction = QtGui.QAction('Expand Branch', self)
self.expandBranchAction.setEnabled(True)
self.expandBranchAction.setStatusTip('Expand all versions in the tree below the current version')
self.collapseBranchAction = QtGui.QAction('Collapse Branch', self)
self.collapseBranchAction.setEnabled(True)
self.collapseBranchAction.setStatusTip('Collapse all expanded versions in the tree below the current version')
self.collapseAllAction = QtGui.QAction('Collapse All', self)
self.collapseAllAction.setEnabled(True)
self.collapseAllAction.setStatusTip('Collapse all expanded branches of the tree')
self.hideBranchAction = QtGui.QAction('Hide Branch', self)
self.hideBranchAction.setEnabled(True)
self.hideBranchAction.setStatusTip('Hide all versions in the tree including and below the current version')
self.showAllAction = QtGui.QAction('Show All', self)
self.showAllAction.setEnabled(True)
self.showAllAction.setStatusTip('Show all hidden versions')
self.moduleConfigViewAction = QtGui.QAction('Module Configuration Panel', self)
self.moduleConfigViewAction.setCheckable(True)
self.moduleConfigViewAction.setChecked(True)
self.vistrailVarsViewAction = QtGui.QAction('VisTrail Variables Panel', self)
self.vistrailVarsViewAction.setCheckable(True)
self.vistrailVarsViewAction.setChecked(True)
self.helpAction = QtGui.QAction(self.tr('About VisTrails...'), self)
self.checkUpdateAction = QtGui.QAction(self.tr('Check for Updates'), self)
a = QtGui.QAction(self.tr('Execute Current Workflow\tCtrl+Enter'),
self)
self.executeCurrentWorkflowAction = a
self.executeCurrentWorkflowAction.setEnabled(False)
self.executeDiffAction = QtGui.QAction('Execute Version Difference', self)
self.executeDiffAction.setEnabled(False)
self.flushCacheAction = QtGui.QAction(self.tr('Erase Cache Contents'),
self)
self.executeQueryAction = QtGui.QAction('Execute Visual Query', self)
self.executeQueryAction.setEnabled(False)
self.executeExplorationAction = QtGui.QAction(
'Execute Parameter Exploration', self)
self.executeExplorationAction.setEnabled(False)
#mashup actions
self.executeMashupAction = QtGui.QAction(CurrentTheme.EXECUTE_MASHUP_ICON,
'Execute Mashup', self)
self.executeMashupAction.setEnabled(False)
self.createMashupAction = QtGui.QAction(CurrentTheme.MASHUP_ICON,
'Mashup', self)
self.createMashupAction.setEnabled(False)
self.executeShortcuts = [
QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.ControlModifier +
QtCore.Qt.Key_Return), self),
QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.ControlModifier +
QtCore.Qt.Key_Enter), self)
]
self.vistrailActionGroup = QtGui.QActionGroup(self)
self.mergeActionGroup = QtGui.QActionGroup(self)
def createMenu(self):
""" createMenu() -> None
Initialize menu bar of builder window
"""
self.fileMenu = self.menuBar().addMenu('&File')
self.fileMenu.addAction(self.newVistrailAction)
self.fileMenu.addAction(self.openFileAction)
self.openRecentMenu = self.fileMenu.addMenu('Open Recent')
self.update_recent_vistrail_menu()
self.fileMenu.addAction(self.saveFileAction)
self.fileMenu.addAction(self.saveFileAsAction)
self.fileMenu.addAction(self.closeVistrailAction)
self.fileMenu.addSeparator()
self.importMenu = self.fileMenu.addMenu('Import')
self.importMenu.addAction(self.importFileAction)
self.importMenu.addSeparator()
self.importMenu.addAction(self.importWorkflowAction)
self.exportMenu = self.fileMenu.addMenu('Export')
self.exportMenu.addAction(self.exportFileAction)
self.exportMenu.addAction(self.exportStableAction)
self.exportMenu.addSeparator()
self.exportMenu.addAction(self.savePDFAction)
self.exportMenu.addSeparator()
self.exportMenu.addAction(self.saveWorkflowAction)
self.exportMenu.addAction(self.exportWorkflowAction)
self.exportMenu.addSeparator()
self.exportMenu.addAction(self.saveOpmAction)
self.exportMenu.addAction(self.saveLogAction)
self.exportMenu.addAction(self.exportLogAction)
self.exportMenu.addSeparator()
self.exportMenu.addAction(self.saveRegistryAction)
self.exportMenu.addAction(self.exportRegistryAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.quitVistrailsAction)
self.editMenu = self.menuBar().addMenu('&Edit')
self.editMenu.addAction(self.undoAction)
self.editMenu.addAction(self.redoAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.copyAction)
self.editMenu.addAction(self.pasteAction)
self.editMenu.addAction(self.selectAllAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.groupAction)
self.editMenu.addAction(self.ungroupAction)
self.editMenu.addAction(self.showGroupAction)
self.editMenu.addAction(self.makeAbstractionAction)
self.editMenu.addAction(self.convertToAbstractionAction)
self.editMenu.addAction(self.editAbstractionAction)
self.editMenu.addAction(self.importAbstractionAction)
self.editMenu.addAction(self.exportAbstractionAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.controlFlowAssistAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.createMashupAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.repositoryOptions)
self.mergeMenu = self.editMenu.addMenu('Merge with')
self.mergeMenu.menuAction().setEnabled(False)
self.mergeMenu.menuAction().setStatusTip('Merge another VisTrail into the current VisTrail')
self.editMenu.addAction(self.repositoryOptions)
self.editMenu.addSeparator()
self.editMenu.addAction(self.editPreferencesAction)
self.viewMenu = self.menuBar().addMenu('&View')
self.viewMenu.addAction(self.workspaceAction)
self.viewMenu.addAction(self.shellAction)
self.viewMenu.addAction(self.debugAction)
self.viewMenu.addAction(self.provenanceBrowserAction)
self.viewMenu.addAction(self.messagesAction)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.expandBranchAction)
self.viewMenu.addAction(self.collapseBranchAction)
self.viewMenu.addAction(self.collapseAllAction)
#self.viewMenu.addSeparator()
self.viewMenu.addAction(self.hideBranchAction)
self.viewMenu.addAction(self.showAllAction)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.pipViewAction)
self.viewMenu.addAction(
self.modulePalette.toolWindow().toggleViewAction())
self.viewMenu.addAction(self.methodsViewAction)
self.viewMenu.addAction(self.setMethodsViewAction)
self.viewMenu.addAction(self.moduleConfigViewAction)
self.viewMenu.addAction(self.vistrailVarsViewAction)
self.viewMenu.addAction(self.propertiesViewAction)
self.viewMenu.addAction(self.propertiesOverlayAction)
self.runMenu = self.menuBar().addMenu('&Run')
self.runMenu.addAction(self.executeCurrentWorkflowAction)
self.runMenu.addAction(self.executeDiffAction)
self.runMenu.addAction(self.executeQueryAction)
self.runMenu.addAction(self.executeExplorationAction)
self.runMenu.addAction(self.executeMashupAction)
self.runMenu.addSeparator()
self.runMenu.addAction(self.flushCacheAction)
self.vistrailMenu = self.menuBar().addMenu('Vis&trail')
self.vistrailMenu.menuAction().setEnabled(False)
self.packagesMenu = self.menuBar().addMenu('Packages')
self.packagesMenu.menuAction().setEnabled(False)
self.helpMenu = self.menuBar().addMenu('Help')
self.helpMenu.addAction(self.helpAction)
self.helpMenu.addAction(self.checkUpdateAction)
def createToolBar(self):
""" createToolBar() -> None
Create a default toolbar for this builder window
"""
self.toolBar = QtGui.QToolBar(self)
self.toolBar.setWindowTitle('Vistrail File')
self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.addToolBar(self.toolBar)
self.toolBar.addAction(self.newVistrailAction)
self.toolBar.addAction(self.openFileAction)
self.toolBar.addAction(self.saveFileAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.undoAction)
self.toolBar.addAction(self.redoAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.createMashupAction)
self.viewToolBar = QVistrailViewToolBar(self)
self.addToolBar(self.viewToolBar)
self.interactionToolBar = QVistrailInteractionToolBar(self)
self.addToolBar(self.interactionToolBar)
def connectSignals(self):
""" connectSignals() -> None
Map signals between various GUI components
"""
self.connect(self.viewManager,
QtCore.SIGNAL('moduleSelectionChange'),
self.moduleSelectionChange)
self.connect(self.viewManager,
QtCore.SIGNAL('versionSelectionChange'),
self.versionSelectionChange)
self.connect(self.viewManager,
QtCore.SIGNAL('execStateChange()'),
self.execStateChange)
self.connect(self.viewManager,
QtCore.SIGNAL('currentVistrailChanged'),
self.currentVistrailChanged)
self.connect(self.viewManager,
QtCore.SIGNAL('vistrailChanged()'),
self.vistrailChanged)
self.connect(self.viewManager,
QtCore.SIGNAL('vistrailViewAdded'),
self.vistrailViewAdded)
self.connect(self.viewManager,
QtCore.SIGNAL('vistrailViewRemoved'),
self.vistrailViewRemoved)
self.connect(QtGui.QApplication.clipboard(),
QtCore.SIGNAL('dataChanged()'),
self.clipboardChanged)
trigger_actions = [
(self.redoAction, self.viewManager.redo),
(self.undoAction, self.viewManager.undo),
(self.copyAction, self.viewManager.copySelection),
(self.pasteAction, self.viewManager.pasteToCurrentPipeline),
(self.selectAllAction, self.viewManager.selectAllModules),
(self.groupAction, self.viewManager.group),
(self.ungroupAction, self.viewManager.ungroup),
(self.showGroupAction, self.showGroup),
(self.makeAbstractionAction,
self.viewManager.makeAbstraction),
(self.convertToAbstractionAction,
self.viewManager.convertToAbstraction),
(self.editAbstractionAction, self.editAbstraction),
(self.importAbstractionAction, self.viewManager.importAbstraction),
(self.exportAbstractionAction, self.viewManager.exportAbstraction),
(self.controlFlowAssistAction, self.controlFlowAssist),
(self.newVistrailAction, self.newVistrail),
(self.openFileAction, self.open_vistrail_default),
(self.importFileAction, self.import_vistrail_default),
(self.saveFileAction, self.save_vistrail_default),
(self.saveFileAsAction, self.save_vistrail_default_as),
(self.exportFileAction, self.export_vistrail_default),
(self.closeVistrailAction, self.viewManager.closeVistrail),
(self.exportStableAction, self.viewManager.export_stable),
(self.saveOpmAction, self.viewManager.save_opm),
(self.saveLogAction, self.save_log_default),
(self.exportLogAction, self.export_log_default),
(self.importWorkflowAction, self.import_workflow_default),
(self.saveWorkflowAction, self.save_workflow_default),
(self.exportWorkflowAction, self.export_workflow_default),
(self.saveRegistryAction, self.save_registry_default),
(self.exportRegistryAction, self.export_registry_default),
(self.savePDFAction, self.save_pdf),
(self.expandBranchAction, self.expandBranch),
(self.collapseBranchAction, self.collapseBranch),
(self.collapseAllAction, self.collapseAll),
(self.hideBranchAction, self.hideBranch),
(self.showAllAction, self.showAll),
(self.helpAction, self.showAboutMessage),
(self.checkUpdateAction, self.showUpdatesMessage),
(self.repositoryOptions, self.showRepositoryOptions),
(self.editPreferencesAction, self.showPreferences),
(self.executeCurrentWorkflowAction,
self.execute_current_pipeline),
(self.executeDiffAction, self.showDiff),
(self.executeQueryAction, self.queryVistrail),
(self.executeExplorationAction,
self.execute_current_exploration),
(self.flushCacheAction, self.flush_cache),
(self.quitVistrailsAction, self.quitVistrails),
(self.createMashupAction, self.createMashup),
(self.executeMashupAction, self.executeMashup)
]
for (emitter, receiver) in trigger_actions:
self.connect(emitter, QtCore.SIGNAL('triggered()'), receiver)
self.connect(self.pipViewAction,
QtCore.SIGNAL('triggered(bool)'),
self.viewManager.setPIPMode)
self.connect(self.methodsViewAction,
QtCore.SIGNAL('triggered(bool)'),
self.viewManager.setMethodsMode)
self.connect(self.setMethodsViewAction,
QtCore.SIGNAL('triggered(bool)'),
self.viewManager.setSetMethodsMode)
self.connect(self.propertiesViewAction,
QtCore.SIGNAL('triggered(bool)'),
self.viewManager.setPropertiesMode)
self.connect(self.propertiesOverlayAction,
QtCore.SIGNAL('triggered(bool)'),
self.viewManager.setPropertiesOverlayMode)
self.connect(self.moduleConfigViewAction,
QtCore.SIGNAL('triggered(bool)'),
self.viewManager.setModuleConfigMode)
self.connect(self.vistrailVarsViewAction,
QtCore.SIGNAL('triggered(bool)'),
self.viewManager.setVistrailVarsMode)
self.connect(self.vistrailActionGroup,
QtCore.SIGNAL('triggered(QAction *)'),
self.vistrailSelectFromMenu)
self.connect(self.mergeActionGroup,
QtCore.SIGNAL('triggered(QAction *)'),
self.vistrailMergeFromMenu)
self.connect(self.workspaceAction,
QtCore.SIGNAL('triggered(bool)'),
self.showWorkspace)
self.connect(self.provenanceBrowserAction,
QtCore.SIGNAL('triggered(bool)'),
self.showProvenanceBrowser)
self.connect(self.shellAction,
QtCore.SIGNAL('triggered(bool)'),
self.showShell)
self.connect(self.debugAction,
QtCore.SIGNAL('triggered(bool)'),
self.showDebugger)
self.connect(self.messagesAction,
QtCore.SIGNAL('triggered(bool)'),
self.showMessages)
self.connect(gui.debug.DebugView.getInstance(),
QtCore.SIGNAL("messagesView(bool)"),
self.messagesAction.setChecked)
for shortcut in self.executeShortcuts:
self.connect(shortcut,
QtCore.SIGNAL('activated()'),
self.execute_current_pipeline)
self.connect_package_manager_signals()
def connect_package_manager_signals(self):
""" connect_package_manager_signals()->None
Connect specific signals related to the package manager """
pm = get_package_manager()
self.connect(pm,
pm.add_package_menu_signal,
self.add_package_menu_items)
self.connect(pm,
pm.remove_package_menu_signal,
self.remove_package_menu_items)
self.connect(pm,
pm.package_error_message_signal,
self.show_package_error_message)
def add_package_menu_items(self, pkg_id, pkg_name, items):
"""add_package_menu_items(pkg_id: str,pkg_name: str,items: list)->None
Add a pacckage menu entry with submenus defined by 'items' to
Packages menu.
"""
if len(self._package_menu_items) == 0:
self.packagesMenu.menuAction().setEnabled(True)
# we don't support a menu hierarchy yet, only a flat list
# this can be added later
if not self._package_menu_items.has_key(pkg_id):
pkg_menu = self.packagesMenu.addMenu(str(pkg_name))
self._package_menu_items[pkg_id] = pkg_menu
else:
pkg_menu = self._package_menu_items[pkg_id]
pkg_menu.clear()
for item in items:
(name, callback) = item
action = QtGui.QAction(name,self)
self.connect(action, QtCore.SIGNAL('triggered()'),
callback)
pkg_menu.addAction(action)
def remove_package_menu_items(self, pkg_id):
"""remove_package_menu_items(pkg_id: str)-> None
removes all menu entries from the Packages Menu created by pkg_id """
if self._package_menu_items.has_key(pkg_id):
pkg_menu = self._package_menu_items[pkg_id]
del self._package_menu_items[pkg_id]
pkg_menu.clear()
pkg_menu.deleteLater()
if len(self._package_menu_items) == 0:
self.packagesMenu.menuAction().setEnabled(False)
def show_package_error_message(self, pkg_id, pkg_name, msg):
"""show_package_error_message(pkg_id: str, pkg_name: str, msg:str)->None
shows a message box with the message msg.
Because the way initialization is being set up, the messages will be
shown after the builder window is shown.
"""
msgbox = build_custom_window("Package %s (%s) says:"%(pkg_name,pkg_id),
msg,
modal=True,
parent=self)
#we cannot call self.msgbox.exec_() or the initialization will hang
# creating a modal window and calling show() does not cause it to hang
# and forces the messages to be shown on top of the builder window after
# initialization
msgbox.show()
def setDBDefault(self, on):
""" setDBDefault(on: bool) -> None
The preferences are set to turn on/off read/write from db instead of
file. Update the state accordingly.
"""
self.dbDefault = on
if self.dbDefault:
self.openFileAction.setIcon(CurrentTheme.OPEN_VISTRAIL_DB_ICON)
self.openFileAction.setStatusTip('Open an existing vistrail from '
'a database')
self.importFileAction.setIcon(CurrentTheme.OPEN_VISTRAIL_ICON)
self.importFileAction.setText('From XML File...')
self.importFileAction.setStatusTip('Import an existing vistrail '
' from a file')
self.saveFileAction.setStatusTip('Save the current vistrail '
'to a database')
self.saveFileAsAction.setStatusTip('Save the current vistrail to a '
'different database location')
self.exportFileAction.setText('To XML File...')
self.exportFileAction.setStatusTip('Save the current vistrail to '
' a file')
self.exportLogAction.setText('Log To XML File...')
self.exportLogAction.setStatusTip('Save the execution log to '
'a file')
self.saveLogAction.setText('Log To DB...')
self.saveLogAction.setStatusTip('Save the execution log to '
'a database')
self.exportWorkflowAction.setText('Workflow To XML File...')
self.exportWorkflowAction.setStatusTip('Save the current workflow '
'to a file')
self.importWorkflowAction.setStatusTip('Import a workflow from a '
'database')
self.saveWorkflowAction.setText('Workflow To DB...')
self.saveWorkflowAction.setStatusTip('Save the current workflow '
'to a database')
self.exportRegistryAction.setText('Registry To XML File...')
self.exportRegistryAction.setStatusTip('Save the current registry '
'to a file')
self.saveRegistryAction.setText('Registry To DB...')
self.saveRegistryAction.setStatusTip('Save the current registry '
'to a database')
else:
self.openFileAction.setIcon(CurrentTheme.OPEN_VISTRAIL_ICON)
self.openFileAction.setStatusTip('Open an existing vistrail from '
'a file')
self.importFileAction.setIcon(CurrentTheme.OPEN_VISTRAIL_DB_ICON)
self.importFileAction.setText('From DB...')
self.importFileAction.setStatusTip('Import an existing vistrail '
' from a database')
self.saveFileAction.setStatusTip('Save the current vistrail '
'to a file')
self.saveFileAsAction.setStatusTip('Save the current vistrail to a '
'different file location')
self.exportFileAction.setStatusTip('Save the current vistrail to '
' a database')
self.saveLogAction.setText('Log To XML...')
self.saveLogAction.setStatusTip('Save the execution log to '
'a file')
self.exportLogAction.setText('Log To DB...')
self.exportLogAction.setStatusTip('Export the execution log to '
'a database')
self.importWorkflowAction.setStatusTip('Import a workflow from an '
'xml file')
self.saveWorkflowAction.setText('Workflow To XML File...')
self.saveWorkflowAction.setStatusTip('Save the current workflow '
'to a file')
self.exportWorkflowAction.setText('Worfklow To DB...')
self.exportWorkflowAction.setStatusTip('Save the current workflow '
'to a database')
self.saveRegistryAction.setText('Registry To XML File...')
self.saveRegistryAction.setStatusTip('Save the current registry '
'to a file')
self.exportRegistryAction.setText('Registry To DB...')
self.exportRegistryAction.setStatusTip('Save the current registry '
'to a database')
def moduleSelectionChange(self, selection):
""" moduleSelectionChange(selection: list[id]) -> None
Update the status of tool bar buttons if there is module selected
"""
self.copyAction.setEnabled(len(selection)>0)
self.groupAction.setEnabled(len(selection)>0)
self.ungroupAction.setEnabled(len(selection)>0)
def versionSelectionChange(self, versionId):
""" versionSelectionChange(versionId: int) -> None
Update the status of tool bar buttons if there is a version selected
"""
self.undoAction.setEnabled(versionId>0)
self.selectAllAction.setEnabled(self.viewManager.canSelectAll())
currentView = self.viewManager.currentWidget()
if currentView:
self.redoAction.setEnabled(currentView.can_redo())
else:
self.redoAction.setEnabled(False)
def execStateChange(self):
""" execStateChange() -> None
Something changed on the canvas that effects the execution state,
update interface accordingly.
"""
currentView = self.viewManager.currentWidget()
if currentView:
# Update toolbars
if self.viewIndex == 2:
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
currentView.execQueryEnabled)
elif self.viewIndex == 3:
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
currentView.execExploreEnabled)
else:
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
currentView.execPipelineEnabled)
# Update menu
self.executeCurrentWorkflowAction.setEnabled(
currentView.execPipelineEnabled)
self.executeDiffAction.setEnabled(currentView.execDiffEnabled)
self.executeQueryAction.setEnabled(currentView.execQueryEnabled)
self.executeExplorationAction.setEnabled(currentView.execExploreEnabled)
self.createMashupAction.setEnabled(currentView.createMashupEnabled)
self.executeMashupAction.setEnabled(currentView.execMashupEnabled)
else:
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
False)
self.executeCurrentWorkflowAction.setEnabled(False)
self.executeDiffAction.setEnabled(False)
self.executeQueryAction.setEnabled(False)
self.executeExplorationAction.setEnabled(False)
self.createMashupAction.setEnabled(False)
self.executeMashupAction.setEnabled(False)
def viewModeChanged(self, index):
""" viewModeChanged(index: int) -> None
Update the state of the view buttons
"""
if self.detachedHistoryView and index==1:
index = 0
self.emit(QtCore.SIGNAL("changeViewState(int)"), index)
self.viewIndex = index
self.execStateChange()
self.viewManager.viewModeChanged(index)
def clipboardChanged(self, mode=QtGui.QClipboard.Clipboard):
""" clipboardChanged(mode: QClipboard) -> None
Update the status of tool bar buttons when the clipboard
contents has been changed
"""
clipboard = QtGui.QApplication.clipboard()
self.pasteAction.setEnabled(not clipboard.text().isEmpty())
def currentVistrailChanged(self, vistrailView):
""" currentVistrailChanged(vistrailView: QVistrailView) -> None
Redisplay the new title of vistrail
"""
self.execStateChange()
if vistrailView:
self.setWindowTitle(self.title + ' - ' +
vistrailView.windowTitle())
self.saveFileAction.setEnabled(True)
self.closeVistrailAction.setEnabled(True)
self.saveFileAsAction.setEnabled(True)
self.exportFileAction.setEnabled(True)
self.vistrailMenu.menuAction().setEnabled(True)
self.mergeMenu.menuAction().setEnabled(True)
else:
self.setWindowTitle(self.title)
self.saveFileAction.setEnabled(False)
self.closeVistrailAction.setEnabled(False)
self.saveFileAsAction.setEnabled(False)
self.exportFileAction.setEnabled(False)
self.vistrailMenu.menuAction().setEnabled(False)
self.mergeMenu.menuAction().setEnabled(False)
if vistrailView and vistrailView.viewAction:
vistrailView.viewAction.setText(vistrailView.windowTitle())
if not vistrailView.viewAction.isChecked():
vistrailView.viewAction.setChecked(True)
if vistrailView and vistrailView.mergeAction:
vistrailView.mergeAction.setText(vistrailView.windowTitle())
for mergeAction in self.mergeActionGroup.actions():
if mergeAction == vistrailView.mergeAction:
mergeAction.setVisible(False)
else:
mergeAction.setVisible(True)
self.update_shell()
self.update_debugger()
def vistrailChanged(self):
""" vistrailChanged() -> None
An action was performed on the current vistrail
"""
self.saveFileAction.setEnabled(True)
self.saveFileAsAction.setEnabled(True)
self.exportFileAction.setEnabled(True)
self.update_shell()
self.update_debugger()
def newVistrail(self):
""" newVistrail() -> None
Start a new vistrail, unless user cancels during interaction.
FIXME: There should be a separation between the interactive
and non-interactive parts.
"""
if self.viewManager.newVistrail(False):
self.viewModeChanged(0)
def open_vistrail(self, locator_class):
""" open_vistrail(locator_class) -> None
Prompt user for information to get to a vistrail in different ways,
depending on the locator class given.
"""
locator = locator_class.load_from_gui(self, Vistrail.vtType)
if locator:
if locator.has_temporaries():
if not locator_class.prompt_autosave(self):
locator.clean_temporaries()
if hasattr(locator, '_vnode'):
version = locator._vnode
if hasattr(locator,'_vtag'):
# if a tag is set, it should be used instead of the
# version number
if locator._vtag != '':
version = locator._vtag
self.open_vistrail_without_prompt(locator, version)
self.set_current_locator(locator)
def open_vistrail_without_prompt(self, locator, version=None,
execute_workflow=False,
is_abstraction=False, workflow_exec=None):
"""open_vistrail_without_prompt(locator_class, version: int or str,
execute_workflow: bool,
is_abstraction: bool) -> None
Open vistrail depending on the locator class given.
If a version is given, the workflow is shown on the Pipeline View.
If execute_workflow is True the workflow will be executed.
If is_abstraction is True, the vistrail is flagged as abstraction
If workflow_exec is True, the logged execution will be displayed
"""
if not locator.is_valid():
ok = locator.update_from_gui(self)
else:
ok = True
if ok:
self.viewManager.open_vistrail(locator, version, is_abstraction)
self.closeVistrailAction.setEnabled(True)
self.saveFileAsAction.setEnabled(True)
self.exportFileAction.setEnabled(True)
self.vistrailMenu.menuAction().setEnabled(True)
self.mergeMenu.menuAction().setEnabled(True)
self.viewManager.changeCursor(self.interactionToolBar.cursorMode)
if version:
self.viewModeChanged(0)
else:
self.viewModeChanged(1)
if execute_workflow:
self.execute_current_pipeline()
if workflow_exec:
self.open_workflow_exec(
self.viewManager.currentWidget().vistrail, workflow_exec)
def open_workflow_exec(self, vistrail, exec_id):
""" open_workflow_exec(vistrail, exec_id) -> None
Open specified workflow execution for the current pipeline
"""
self.vislog = QVisualLog(vistrail, exec_id, self)
self.vislog.show()
def open_vistrail_default(self):
""" open_vistrail_default() -> None
Opens a vistrail from the file/db
"""
if self.dbDefault:
self.open_vistrail(DBLocator)
else:
self.open_vistrail(FileLocator())
def open_recent_vistrail(self):
""" open_recent_vistrail() -> None
Opens a vistrail from Open Recent menu list
"""
action = self.sender()
if action:
locator = self.recentVistrailLocators.get_locator_by_name(str(action.data().toString()))
self.open_vistrail_without_prompt(locator)
self.set_current_locator(locator)
def create_recent_vistrail_actions(self):
maxRecentVistrails = int(getattr(get_vistrails_configuration(),
'maxRecentVistrails'))
#check if we have enough actions
while len(self.recentVistrailActs) < maxRecentVistrails:
action = QtGui.QAction(self)
action.setVisible(False)
self.connect(action, QtCore.SIGNAL("triggered()"),
self.open_recent_vistrail)
self.recentVistrailActs.append(action)
def update_recent_vistrail_menu(self):
#check if we have enough actions
for i in range(len(self.openRecentMenu.actions()),
len(self.recentVistrailActs)):
self.openRecentMenu.addAction(self.recentVistrailActs[i])
def update_recent_vistrail_actions(self):
maxRecentVistrails = int(getattr(get_vistrails_configuration(),
'maxRecentVistrails'))
self.recentVistrailLocators.ensure_no_more_than_max(maxRecentVistrails)
#check if we have enough actions
self.create_recent_vistrail_actions()
self.update_recent_vistrail_menu()
for i in range(self.recentVistrailLocators.length()):
locator = self.recentVistrailLocators.get_locator(i)
text = "&%d %s" % (i + 1, locator.name)
self.recentVistrailActs[i].setText(text)
self.recentVistrailActs[i].setData(locator.name)
self.recentVistrailActs[i].setVisible(True)
for j in range(self.recentVistrailLocators.length(),len(self.recentVistrailActs)):
self.recentVistrailActs[j].setVisible(False)
conf = get_vistrails_persistent_configuration()
tconf = get_vistrails_configuration()
conf.recentVistrailList = self.recentVistrailLocators.serialize()
tconf.recentVistrailList = conf.recentVistrailList
VistrailsApplication.save_configuration()
def set_current_locator(self, locator):
""" set_current_locator(locator: CoreLocator)
Updates the list of recent files in the gui and in the configuration
"""
if locator:
self.recentVistrailLocators.add_locator(locator)
self.update_recent_vistrail_actions()
def max_recent_vistrails_changed(self, field, value):
"""max_recent_vistrails_changed()-> obj
callback to create an object to be used as a subscriber when the
configuration changed.
"""
self.update_recent_vistrail_actions()
def import_vistrail_default(self):
""" import_vistrail_default() -> None
Imports a vistrail from the file/db
"""
if self.dbDefault:
self.open_vistrail(FileLocator)
else:
self.open_vistrail(DBLocator)
def save_vistrail(self):
""" save_vistrail() -> None
Save the current vistrail to file
"""
current_view = self.viewManager.currentWidget()
locator = current_view.controller.locator
if locator is None:
class_ = FileLocator()
else:
class_ = type(locator)
self.viewManager.save_vistrail(class_)
def save_vistrail_default(self):
""" save_vistrail_default() -> None
Save the current vistrail to the file/db
"""
if self.dbDefault:
self.viewManager.save_vistrail(DBLocator)
else:
self.viewManager.save_vistrail(FileLocator())
def save_vistrail_default_as(self):
""" save_vistrail_file_as() -> None
Save the current vistrail to the file/db
"""
if self.dbDefault:
locator = self.viewManager.save_vistrail(DBLocator,
force_choose_locator=True)
else:
locator = self.viewManager.save_vistrail(FileLocator(),
force_choose_locator=True)
if locator:
self.set_current_locator(locator)
def export_vistrail_default(self):
""" export_vistrail_default() -> None
Export the current vistrail to the file/db
"""
if self.dbDefault:
self.viewManager.save_vistrail(FileLocator(),
force_choose_locator=True)
else:
self.viewManager.save_vistrail(DBLocator,
force_choose_locator=True)
def save_log(self, invert=False, choose=True):
# want xor of invert and dbDefault
if (invert and not self.dbDefault) or (not invert and self.dbDefault):
self.viewManager.save_log(DBLocator,
force_choose_locator=choose)
else:
self.viewManager.save_log(XMLFileLocator,
force_choose_locator=choose)
def save_log_default(self):
self.save_log(False)
def export_log_default(self):
self.save_log(True)
def import_workflow(self, locator_class):
locator = locator_class.load_from_gui(self, Pipeline.vtType)
if locator:
if not locator.is_valid():
ok = locator.update_from_gui(self, Pipeline.vtType)
else:
ok = True
if ok:
self.viewManager.open_workflow(locator)
self.closeVistrailAction.setEnabled(True)
self.saveFileAsAction.setEnabled(True)
self.exportFileAction.setEnabled(True)
self.vistrailMenu.menuAction().setEnabled(True)
self.mergeMenu.menuAction().setEnabled(True)
self.viewModeChanged(1)
def import_workflow_default(self):
self.import_workflow(XMLFileLocator)
def save_workflow(self, invert=False, choose=True):
# want xor of invert and dbDefault
if (invert and not self.dbDefault) or (not invert and self.dbDefault):
self.viewManager.save_workflow(DBLocator,
force_choose_locator=choose)
else:
self.viewManager.save_workflow(XMLFileLocator,
force_choose_locator=choose)
def save_workflow_default(self):
self.save_workflow(False)
def export_workflow_default(self):
self.save_workflow(True)
def save_registry(self, invert=False, choose=True):
# want xor of invert and dbDefault
if (invert and not self.dbDefault) or (not invert and self.dbDefault):
self.viewManager.save_registry(DBLocator,
force_choose_locator=choose)
else:
self.viewManager.save_registry(XMLFileLocator,
force_choose_locator=choose)
def save_registry_default(self):
self.save_registry(False)
def export_registry_default(self):
self.save_registry(True)
def save_pdf(self):
active_window = VistrailsApplication.activeWindow()
view = None
if active_window and active_window.centralWidget() and \
hasattr(active_window.centralWidget(), 'saveToPDF'):
view = active_window.centralWidget()
elif active_window and hasattr(active_window, 'viewManager') and \
hasattr(active_window.viewManager.currentView().\
stackedWidget.currentWidget().centralWidget(),
'saveToPDF'):
view = active_window.viewManager.currentView().stackedWidget.\
currentWidget().centralWidget()
if view is not None:
fileName = QtGui.QFileDialog.getSaveFileName(
active_window,
"Save PDF...",
core.system.vistrails_file_directory(),
"PDF files (*.pdf)",
None)
if fileName.isEmpty():
return None
f = str(fileName)
view.saveToPDF(f)
def quitVistrails(self):
""" quitVistrails() -> bool
Quit Vistrail, return False if not succeeded
"""
if self.viewManager.closeAllVistrails():
QtCore.QCoreApplication.quit()
# In case the quit() failed (when Qt doesn't have the main
# event loop), we have to return True still
return True
return False
def vistrailViewAdded(self, view):
""" vistrailViewAdded(view: QVistrailView) -> None
Add this vistrail to the Vistrail menu
"""
view.viewAction = QtGui.QAction(view.windowTitle(), self)
view.viewAction.view = view
view.viewAction.setCheckable(True)
self.vistrailActionGroup.addAction(view.viewAction)
self.vistrailMenu.addAction(view.viewAction)
view.versionTab.versionView.scene().fitToView(
view.versionTab.versionView, True)
# create merge action
view.mergeAction = QtGui.QAction(view.windowTitle(), self)
view.mergeAction.view = view
self.mergeActionGroup.addAction(view.mergeAction)
self.mergeMenu.addAction(view.mergeAction)
def vistrailViewRemoved(self, view):
""" vistrailViewRemoved(view: QVistrailView) -> None
Remove this vistrail from the Vistrail menu
"""
self.vistrailActionGroup.removeAction(view.viewAction)
self.vistrailMenu.removeAction(view.viewAction)
view.viewAction.view = None
# delete merge action
self.mergeActionGroup.removeAction(view.mergeAction)
self.mergeMenu.removeAction(view.mergeAction)
view.mergeAction.view = None
def vistrailSelectFromMenu(self, menuAction):
""" vistrailSelectFromMenu(menuAction: QAction) -> None
Handle clicked from the Vistrail menu
"""
self.viewManager.setCurrentWidget(menuAction.view)
def vistrailMergeFromMenu(self, mergeAction):
""" vistrailSelectFromMenu(menuAction: QAction) -> None
Handle clicked from the Vistrail menu
"""
thumb_cache = ThumbnailCache.getInstance()
c1 = self.viewManager.currentView().controller
c2 = mergeAction.view.controller
if c1.changed or c2.changed:
text = ('Both Vistrails need to be saved before they can be merged.')
QtGui.QMessageBox.information(None, 'Cannot perform merge',
text, '&OK')
return
l1 = c1.locator._name if c1.locator is not None else ''
t1 = c1.find_thumbnails(tags_only=thumb_cache.conf.tagsOnly) \
if thumb_cache.conf.autoSave else []
s1 = SaveBundle(c1.vistrail.vtType, c1.vistrail.do_copy(), c1.log, thumbnails=t1)
l2 = c2.locator._name if c2.locator is not None else ''
t2 = c2.find_thumbnails(tags_only=thumb_cache.conf.tagsOnly) \
if thumb_cache.conf.autoSave else []
s2 = SaveBundle(c2.vistrail.vtType, c2.vistrail, c2.log, thumbnails=t2)
db.services.vistrail.merge(s1, s2, "", merge_gui, l1, l2)
vistrail = s1.vistrail
vistrail.locator = None
vistrail.set_defaults()
self.viewManager.set_vistrail_view(vistrail, None, thumbnail_files=s1.thumbnails)
self.viewManager.currentView().controller.changed = True
self.viewManager.currentView().stateChanged()
def showWorkspace(self, checked=True):
""" showWorkspace() -> None
Display the vistrail workspace """
if checked:
self.workspace.show()
else:
self.workspace.hide()
def showProvenanceBrowser(self, checked=True):
""" showWorkspace() -> None
Display the vistrail workspace """
if checked:
self.provenanceBrowser.show()
else:
self.provenanceBrowser.hide()
def showShell(self, checked=True):
""" showShell() -> None
Display the shell console
"""
if checked:
self.savePythonPrompt()
if not self.shell:
self.shell = QShellDialog(self)
self.connect(self.shell,QtCore.SIGNAL("shellHidden()"),
self.shellAction.toggle)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,
self.shell)
self.shell.show()
currentView = self.viewManager.currentWidget()
if currentView:
controller = currentView.controller
pipeline = controller.current_pipeline
self.shell.shell.add_controller(controller)
self.shell.shell.add_pipeline(pipeline)
else:
if self.shell:
self.shell.hide()
self.recoverPythonPrompt()
def update_shell(self):
try:
if not self.shell:
self.shell = QShellDialog(self)
self.connect(self.shell, QtCore.SIGNAL("shellHidden()"),
self.shellAction.toggle)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,
self.shell)
self.shell.shell.add_controller(self.viewManager.currentWidget().controller)
if self.shell.isVisible():
self.shell.show()
else:
self.shell.hide()
except:
pass
def showDebugger(self, checked=True):
ctrlr = self.viewManager.currentWidget().controller
if checked:
if not self.debugger:
self.debugger = QDebugger(self, ctrlr)
self.connect(self.debugger, QtCore.SIGNAL("debuggerHidden()"),
self.debugAction.toggle)
self.debugger.setWindowTitle("Debugger")
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,
self.debugger)
self.debugger.show()
elif self.debugger and self.debugger.isVisible():
self.debugger.hide()
def update_debugger(self):
if self.viewManager.currentWidget() is None:
return
ctrlr = self.viewManager.currentWidget().controller
if not self.debugger:
self.debugger = QDebugger(self, ctrlr)
self.connect(self.debugger, QtCore.SIGNAL("debuggerHidden()"),
self.debugAction.toggle)
self.debugger.setWindowTitle("Debugger")
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,
self.debugger)
self.debugger.set_controller(ctrlr)
if self.debugger.isVisible():
self.debugger.show()
else:
self.debugger.hide()
def showMessages(self, checked=True):
debugView = gui.debug.DebugView.getInstance()
if checked:
debugView.show()
else:
debugView.hide()
def savePythonPrompt(self):
"""savePythonPrompt() -> None
Keep system standard input and output internally
"""
self.stdout = sys.stdout
self.stdin = sys.stdin
self.stderr = sys.stderr
def recoverPythonPrompt(self):
"""recoverPythonPrompt() -> None
Reassign system standard input and output to previous saved state.
"""
sys.stdout = self.stdout
sys.stdin = self.stdin
sys.stderr = self.stderr
def showAboutMessage(self):
"""showAboutMessage() -> None
Displays Application about message
"""
class About(QtGui.QLabel):
def mousePressEvent(self, e):
self.emit(QtCore.SIGNAL("clicked()"))
dlg = QtGui.QDialog(self, QtCore.Qt.FramelessWindowHint)
layout = QtGui.QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
bgimage = About(dlg)
bgimage.setPixmap(CurrentTheme.DISCLAIMER_IMAGE)
layout.addWidget(bgimage)
dlg.setLayout(layout)
text = "<font color=\"white\"><b>%s</b></font>" % \
system.short_about_string()
version = About(text, dlg)
version.setGeometry(11,20,450,30)
self.connect(bgimage,
QtCore.SIGNAL('clicked()'),
dlg,
QtCore.SLOT('accept()'))
self.connect(version,
QtCore.SIGNAL('clicked()'),
dlg,
QtCore.SLOT('accept()'))
dlg.setSizeGripEnabled(False)
dlg.exec_()
#QtGui.QMessageBox.about(self,self.tr("About VisTrails..."),
# self.tr(system.about_string()))
def showUpdatesMessage(self):
""" showUpdatesMessage() -> None
Displays Check for Updates message.
This queries vistrails.org for new VisTrails Versions
"""
dlg = QtGui.QDialog(self)
dlg.setWindowTitle('Check for VisTrails Updates')
layout = QtGui.QVBoxLayout()
layout.setSpacing(6)
layout.setMargin(11)
layout.addStrut(400)
new_version_exists, version = core.system.new_vistrails_release_exists()
if new_version_exists:
msg = 'Version %s of VisTrails is available at <a href="%s">%s</a>' % \
(version, "http://www.vistrails.org/index.php/Downloads",
"http://www.vistrails.org/index.php/Downloads")
label = QtGui.QLabel(msg)
else:
label = QtGui.QLabel("No new version of VisTrails available")
closeButton = QtGui.QPushButton('Ok', dlg)
closeButton.setShortcut('Enter')
layout.addWidget(label)
layout.addWidget(closeButton)
dlg.connect(closeButton, QtCore.SIGNAL('clicked(bool)'), dlg.close)
dlg.setLayout(layout)
dlg.exec_()
def showRepositoryOptions(self):
""" Displays Repository Options for authentication and pushing VisTrail to Repository """
dialog = QRepositoryDialog(self)
dialog.exec_()
def showPreferences(self):
"""showPreferences() -> None
Display Preferences dialog
"""
dialog = QPreferencesDialog(self)
retval = dialog.exec_()
if retval != 0:
self.flush_cache()
currentView = self.viewManager.currentWidget()
if currentView:
controller = currentView.controller
controller.validate(controller.current_pipeline)
# Update the state of the icons if changing between db and file
# support
dbState = getattr(get_vistrails_configuration(), 'dbDefault')
if self.dbDefault != dbState:
self.setDBDefault(dbState)
def showDiff(self):
"""showDiff() -> None
Show the visual difference interface
"""
currentView = self.viewManager.currentWidget()
if (currentView and currentView.execDiffId1 >= 0 and
currentView.execDiffId2 >= 0):
visDiff = QVisualDiff(currentView.controller.vistrail,
currentView.execDiffId1,
currentView.execDiffId2,
currentView.controller,
self)
visDiff.show()
def showGroup(self):
"""showGroup() -> None
Show the pipeline underlying a group module
"""
class DummyController(object):
def __init__(self, pip):
self.current_pipeline = pip
self.search = None
# def copy_modules_and_connections(self, module_ids, connection_ids):
# """copy_modules_and_connections(module_ids: [long],
# connection_ids: [long]) -> str
# Serializes a list of modules and connections
# """
# pipeline = Pipeline()
# # pipeline.set_abstraction_map( \
# # self.current_pipeline.abstraction_map)
# for module_id in module_ids:
# module = self.current_pipeline.modules[module_id]
# # if module.vtType == Abstraction.vtType:
# # abstraction = \
# # pipeline.abstraction_map[module.abstraction_id]
# # pipeline.add_abstraction(abstraction)
# pipeline.add_module(module)
# for connection_id in connection_ids:
# connection = self.current_pipeline.connections[connection_id]
# pipeline.add_connection(connection)
# return core.db.io.serialize(pipeline)
currentView = self.viewManager.currentWidget()
if currentView:
currentScene = currentView.pipelineTab.pipelineView.scene()
if currentScene.controller:
selected_items = currentScene.get_selected_item_ids()
selected_module_ids = selected_items[0]
if len(selected_module_ids) > 0:
for id in selected_module_ids:
group = \
currentScene.controller.current_pipeline.modules[id]
if (group.vtType == 'group' or
group.vtType == 'abstraction'):
pipelineMainWindow = QtGui.QMainWindow(self)
pipelineView = QPipelineView()
controller = DummyController(group.pipeline)
pipelineView.controller = controller
pipelineMainWindow.setCentralWidget(pipelineView)
pipelineView.scene().controller = \
controller
controller.current_pipeline_view = \
pipelineView.scene()
group.pipeline.ensure_connection_specs()
pipelineView.scene().setupScene(group.pipeline)
pipelineView.scene().fitToView(pipelineView, True)
self.groupPipelineView = pipelineView
pipelineView.show()
pipelineMainWindow.show()
def openAbstraction(self, filename):
locator = XMLFileLocator(filename)
self.open_vistrail_without_prompt(locator, None, False, True)
def editAbstraction(self):
currentView = self.viewManager.currentWidget()
if currentView:
currentScene = currentView.pipelineTab.pipelineView.scene()
if currentScene.controller:
selected_items = currentScene.get_selected_item_ids()
selected_module_ids = selected_items[0]
if len(selected_module_ids) > 0:
for id in selected_module_ids:
abstraction = \
currentScene.controller.current_pipeline.modules[id]
if abstraction.vtType == 'abstraction':
from core.modules.abstraction import identifier as abstraction_pkg
if abstraction.package == abstraction_pkg and abstraction.vistrail.get_annotation('__abstraction_descriptor_info__') is None:
desc = abstraction.module_descriptor
filename = desc.module.vt_fname
self.openAbstraction(filename)
else:
show_info('Package SubWorkflow is Read-Only',
'This SubWorkflow is from a package and cannot be modified.\n\nYou can create an editable copy in \'My SubWorkflows\' using\n\'Edit->Import SubWorkflow\'')
def controlFlowAssist(self):
""" controlFlowAssist() -> None
Launch Control Flow Assistant for selected modules.
"""
currentView = self.viewManager.currentWidget()
if currentView:
currentScene = currentView.pipelineTab.pipelineView.scene()
if currentScene.controller:
selected_items = currentScene.get_selected_item_ids(True)
if selected_items is None:
selected_items = ([],[])
selected_module_ids = selected_items[0]
selected_connection_ids = selected_items[1]
if len(selected_module_ids) > 0:
dialog = QControlFlowAssistDialog(self, selected_module_ids, selected_connection_ids, currentScene)
dialog.exec_()
else:
show_info('No Modules Selected', 'You must select at least one module to use the Control Flow Assistant.')
def expandBranch(self):
""" expandBranch() -> None
Expand branch of tree
"""
controller = self.viewManager.currentWidget().controller
controller.expand_or_collapse_all_versions_below(controller.current_version, True)
def collapseBranch(self):
""" collapseBranch() -> None
Collapse branch of tree
"""
controller = self.viewManager.currentWidget().controller
controller.expand_or_collapse_all_versions_below(controller.current_version, False)
def collapseAll(self):
""" collapseAll() -> None
Collapse all branches of tree
"""
controller = self.viewManager.currentWidget().controller
controller.collapse_all_versions()
def hideBranch(self):
""" hideBranch() -> None
Hide node and all children
"""
controller = self.viewManager.currentWidget().controller
controller.hide_versions_below(controller.current_version)
def showAll(self):
""" showAll() -> None
Show all hidden nodes
"""
controller = self.viewManager.currentWidget().controller
controller.show_all_versions()
def execute(self, index):
""" execute(index: int) -> None
Execute something depending on the view
"""
if index == 2:
self.queryVistrail()
elif index == 3:
self.execute_current_exploration()
else:
self.execute_current_pipeline()
def queryVistrail(self):
""" queryVistrail() -> None
Execute a query and switch to history view if in query or explore mode
"""
if self.viewIndex > 1:
self.viewModeChanged(1)
self.viewManager.queryVistrail()
def flush_cache(self):
core.interpreter.cached.CachedInterpreter.flush()
def execute_current_exploration(self):
"""execute_current_exploration() -> None
Executes the current parameter exploration, if possible.
"""
if self._executing:
return
self._executing = True
try:
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
False)
self.viewModeChanged(3)
self.viewManager.executeCurrentExploration()
finally:
self._executing = False
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
True)
def execute_current_pipeline(self):
"""execute_current_pipeline() -> None
Executes the current pipeline, if possible.
"""
if self._executing:
return
self._executing = True
try:
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
False)
self.viewManager.executeCurrentPipeline()
finally:
self._executing = False
self.emit(QtCore.SIGNAL("executeEnabledChanged(bool)"),
True)
def createMashup(self):
"""createMashup() -> None
Create a mashup from current pipeline """
#get current controller and current version
controller = self.viewManager.currentView().controller
version = controller.current_version
if version > 0:
mshpManager = MashupsManager.getInstance()
mshpManager.createMashup(controller, version)
def executeMashup(self):
"""executeMashup() -> None
Execute current mashup """
pass
def interactiveExportCurrentPipeline(self):
""" interactiveExportPipeline()
Hide the builder window and show the spreadsheet window with
only cells belonging to the pipeline specified by locator:version
"""
from packages.spreadsheet.spreadsheet_controller import spreadsheetController
spreadsheetWindow = spreadsheetController.findSpreadsheetWindow()
from core.inspector import PipelineInspector
currentView = self.viewManager.currentWidget()
controller = currentView.controller
inspector = PipelineInspector()
pipeline = controller.current_pipeline
inspector.inspect_spreadsheet_cells(pipeline)
inspector.inspect_ambiguous_modules(pipeline)
vCol = 0
cells = {}
for mId in inspector.spreadsheet_cells:
name = pipeline.modules[mId].name
if inspector.annotated_modules.has_key(mId):
idx = inspector.annotated_modules[mId]
else:
idx = -1
cells[(name, idx)] = (0, vCol)
vCol += 1
self.hide()
spreadsheetWindow.prepareReviewingMode(vCol)
from gui.paramexplore.virtual_cell import _positionPipelines
[newPipeline] = _positionPipelines('Pipeline Review',
1, 1, 1, [pipeline],
(1, vCol, cells), pipeline)
controller.execute_workflow_list([(controller.locator,
controller.current_version,
newPipeline,
controller.current_pipeline_view,
None)])
spreadsheetWindow.startReviewingMode()
################################################################################
# import unittest
# import api
# class TestBuilderWindow(unittest.TestCase):
# def test_close_actions_enabled(self):
| |
# encoding: utf-8
"""
Constant values related to the Open Packaging Convention, in particular,
content types and relationship types.
"""
class CONTENT_TYPE(object):
"""
Content type URIs (like MIME-types) that specify a part's format
"""
BMP = (
'image/bmp'
)
DML_CHART = (
'application/vnd.openxmlformats-officedocument.drawingml.chart+xml'
)
DML_CHARTSHAPES = (
'application/vnd.openxmlformats-officedocument.drawingml.chartshapes'
'+xml'
)
DML_DIAGRAM_COLORS = (
'application/vnd.openxmlformats-officedocument.drawingml.diagramColo'
'rs+xml'
)
DML_DIAGRAM_DATA = (
'application/vnd.openxmlformats-officedocument.drawingml.diagramData'
'+xml'
)
DML_DIAGRAM_LAYOUT = (
'application/vnd.openxmlformats-officedocument.drawingml.diagramLayo'
'ut+xml'
)
DML_DIAGRAM_STYLE = (
'application/vnd.openxmlformats-officedocument.drawingml.diagramStyl'
'e+xml'
)
GIF = (
'image/gif'
)
JPEG = (
'image/jpeg'
)
MS_PHOTO = (
'image/vnd.ms-photo'
)
OFC_CUSTOM_PROPERTIES = (
'application/vnd.openxmlformats-officedocument.custom-properties+xml'
)
OFC_CUSTOM_XML_PROPERTIES = (
'application/vnd.openxmlformats-officedocument.customXmlProperties+x'
'ml'
)
OFC_DRAWING = (
'application/vnd.openxmlformats-officedocument.drawing+xml'
)
OFC_EXTENDED_PROPERTIES = (
'application/vnd.openxmlformats-officedocument.extended-properties+x'
'ml'
)
OFC_OLE_OBJECT = (
'application/vnd.openxmlformats-officedocument.oleObject'
)
OFC_PACKAGE = (
'application/vnd.openxmlformats-officedocument.package'
)
OFC_THEME = (
'application/vnd.openxmlformats-officedocument.theme+xml'
)
OFC_THEME_OVERRIDE = (
'application/vnd.openxmlformats-officedocument.themeOverride+xml'
)
OFC_VML_DRAWING = (
'application/vnd.openxmlformats-officedocument.vmlDrawing'
)
OPC_CORE_PROPERTIES = (
'application/vnd.openxmlformats-package.core-properties+xml'
)
OPC_DIGITAL_SIGNATURE_CERTIFICATE = (
'application/vnd.openxmlformats-package.digital-signature-certificat'
'e'
)
OPC_DIGITAL_SIGNATURE_ORIGIN = (
'application/vnd.openxmlformats-package.digital-signature-origin'
)
OPC_DIGITAL_SIGNATURE_XMLSIGNATURE = (
'application/vnd.openxmlformats-package.digital-signature-xmlsignatu'
're+xml'
)
OPC_RELATIONSHIPS = (
'application/vnd.openxmlformats-package.relationships+xml'
)
PML_COMMENTS = (
'application/vnd.openxmlformats-officedocument.presentationml.commen'
'ts+xml'
)
PML_COMMENT_AUTHORS = (
'application/vnd.openxmlformats-officedocument.presentationml.commen'
'tAuthors+xml'
)
PML_HANDOUT_MASTER = (
'application/vnd.openxmlformats-officedocument.presentationml.handou'
'tMaster+xml'
)
PML_NOTES_MASTER = (
'application/vnd.openxmlformats-officedocument.presentationml.notesM'
'aster+xml'
)
PML_NOTES_SLIDE = (
'application/vnd.openxmlformats-officedocument.presentationml.notesS'
'lide+xml'
)
PML_PRESENTATION_MAIN = (
'application/vnd.openxmlformats-officedocument.presentationml.presen'
'tation.main+xml'
)
PML_PRES_MACRO_MAIN = (
'application/vnd.ms-powerpoint.presentation.macroEnabled.main+xml'
)
PML_PRES_PROPS = (
'application/vnd.openxmlformats-officedocument.presentationml.presPr'
'ops+xml'
)
PML_PRINTER_SETTINGS = (
'application/vnd.openxmlformats-officedocument.presentationml.printe'
'rSettings'
)
PML_SLIDE = (
'application/vnd.openxmlformats-officedocument.presentationml.slide+'
'xml'
)
PML_SLIDESHOW_MAIN = (
'application/vnd.openxmlformats-officedocument.presentationml.slides'
'how.main+xml'
)
PML_SLIDE_LAYOUT = (
'application/vnd.openxmlformats-officedocument.presentationml.slideL'
'ayout+xml'
)
PML_SLIDE_MASTER = (
'application/vnd.openxmlformats-officedocument.presentationml.slideM'
'aster+xml'
)
PML_SLIDE_UPDATE_INFO = (
'application/vnd.openxmlformats-officedocument.presentationml.slideU'
'pdateInfo+xml'
)
PML_TABLE_STYLES = (
'application/vnd.openxmlformats-officedocument.presentationml.tableS'
'tyles+xml'
)
PML_TAGS = (
'application/vnd.openxmlformats-officedocument.presentationml.tags+x'
'ml'
)
PML_TEMPLATE_MAIN = (
'application/vnd.openxmlformats-officedocument.presentationml.templa'
'te.main+xml'
)
PML_VIEW_PROPS = (
'application/vnd.openxmlformats-officedocument.presentationml.viewPr'
'ops+xml'
)
PNG = (
'image/png'
)
SML_CALC_CHAIN = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.calcCha'
'in+xml'
)
SML_CHARTSHEET = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.chartsh'
'eet+xml'
)
SML_COMMENTS = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.comment'
's+xml'
)
SML_CONNECTIONS = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.connect'
'ions+xml'
)
SML_CUSTOM_PROPERTY = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.customP'
'roperty'
)
SML_DIALOGSHEET = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.dialogs'
'heet+xml'
)
SML_EXTERNAL_LINK = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.externa'
'lLink+xml'
)
SML_PIVOT_CACHE_DEFINITION = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCa'
'cheDefinition+xml'
)
SML_PIVOT_CACHE_RECORDS = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCa'
'cheRecords+xml'
)
SML_PIVOT_TABLE = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.pivotTa'
'ble+xml'
)
SML_PRINTER_SETTINGS = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.printer'
'Settings'
)
SML_QUERY_TABLE = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.queryTa'
'ble+xml'
)
SML_REVISION_HEADERS = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.revisio'
'nHeaders+xml'
)
SML_REVISION_LOG = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.revisio'
'nLog+xml'
)
SML_SHARED_STRINGS = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.sharedS'
'trings+xml'
)
SML_SHEET = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
SML_SHEET_MAIN = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.m'
'ain+xml'
)
SML_SHEET_METADATA = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheetMe'
'tadata+xml'
)
SML_STYLES = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.styles+'
'xml'
)
SML_TABLE = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.table+x'
'ml'
)
SML_TABLE_SINGLE_CELLS = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.tableSi'
'ngleCells+xml'
)
SML_TEMPLATE_MAIN = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.templat'
'e.main+xml'
)
SML_USER_NAMES = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.userNam'
'es+xml'
)
SML_VOLATILE_DEPENDENCIES = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.volatil'
'eDependencies+xml'
)
SML_WORKSHEET = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.workshe'
'et+xml'
)
TIFF = (
'image/tiff'
)
WML_COMMENTS = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.comm'
'ents+xml'
)
WML_DOCUMENT_GLOSSARY = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.docu'
'ment.glossary+xml'
)
WML_DOCUMENT_MAIN = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.docu'
'ment.main+xml'
)
WML_ENDNOTES = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.endn'
'otes+xml'
)
WML_FONT_TABLE = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.font'
'Table+xml'
)
WML_FOOTER = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.foot'
'er+xml'
)
WML_FOOTNOTES = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.foot'
'notes+xml'
)
WML_HEADER = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.head'
'er+xml'
)
WML_NUMBERING = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.numb'
'ering+xml'
)
WML_PRINTER_SETTINGS = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.prin'
'terSettings'
)
WML_SETTINGS = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.sett'
'ings+xml'
)
WML_STYLES = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.styl'
'es+xml'
)
WML_WEB_SETTINGS = (
'application/vnd.openxmlformats-officedocument.wordprocessingml.webS'
'ettings+xml'
)
XML = (
'application/xml'
)
X_EMF = (
'image/x-emf'
)
X_FONTDATA = (
'application/x-fontdata'
)
X_FONT_TTF = (
'application/x-font-ttf'
)
X_WMF = (
'image/x-wmf'
)
class NAMESPACE(object):
"""Constant values for OPC XML namespaces"""
DML_WORDPROCESSING_DRAWING = (
'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDraw'
'ing'
)
OFC_RELATIONSHIPS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
)
OPC_RELATIONSHIPS = (
'http://schemas.openxmlformats.org/package/2006/relationships'
)
OPC_CONTENT_TYPES = (
'http://schemas.openxmlformats.org/package/2006/content-types'
)
WML_MAIN = (
'http://schemas.openxmlformats.org/wordprocessingml/2006/main'
)
class RELATIONSHIP_TARGET_MODE(object):
"""Open XML relationship target modes"""
EXTERNAL = 'External'
INTERNAL = 'Internal'
class RELATIONSHIP_TYPE(object):
AUDIO = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/audio'
)
A_F_CHUNK = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/aFChunk'
)
CALC_CHAIN = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/calcChain'
)
CERTIFICATE = (
'http://schemas.openxmlformats.org/package/2006/relationships/digita'
'l-signature/certificate'
)
CHART = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/chart'
)
CHARTSHEET = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/chartsheet'
)
CHART_USER_SHAPES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/chartUserShapes'
)
COMMENTS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/comments'
)
COMMENT_AUTHORS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/commentAuthors'
)
CONNECTIONS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/connections'
)
CONTROL = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/control'
)
CORE_PROPERTIES = (
'http://schemas.openxmlformats.org/package/2006/relationships/metada'
'ta/core-properties'
)
CUSTOM_PROPERTIES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/custom-properties'
)
CUSTOM_PROPERTY = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/customProperty'
)
CUSTOM_XML = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/customXml'
)
CUSTOM_XML_PROPS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/customXmlProps'
)
DIAGRAM_COLORS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/diagramColors'
)
DIAGRAM_DATA = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/diagramData'
)
DIAGRAM_LAYOUT = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/diagramLayout'
)
DIAGRAM_QUICK_STYLE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/diagramQuickStyle'
)
DIALOGSHEET = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/dialogsheet'
)
DRAWING = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/drawing'
)
ENDNOTES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/endnotes'
)
EXTENDED_PROPERTIES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/extended-properties'
)
EXTERNAL_LINK = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/externalLink'
)
FONT = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/font'
)
FONT_TABLE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/fontTable'
)
FOOTER = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/footer'
)
FOOTNOTES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/footnotes'
)
GLOSSARY_DOCUMENT = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/glossaryDocument'
)
HANDOUT_MASTER = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/handoutMaster'
)
HEADER = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/header'
)
HYPERLINK = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/hyperlink'
)
IMAGE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/image'
)
NOTES_MASTER = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/notesMaster'
)
NOTES_SLIDE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/notesSlide'
)
NUMBERING = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/numbering'
)
OFFICE_DOCUMENT = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/officeDocument'
)
OLE_OBJECT = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/oleObject'
)
ORIGIN = (
'http://schemas.openxmlformats.org/package/2006/relationships/digita'
'l-signature/origin'
)
PACKAGE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/package'
)
PIVOT_CACHE_DEFINITION = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/pivotCacheDefinition'
)
PIVOT_CACHE_RECORDS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/spreadsheetml/pivotCacheRecords'
)
PIVOT_TABLE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/pivotTable'
)
PRES_PROPS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/presProps'
)
PRINTER_SETTINGS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/printerSettings'
)
QUERY_TABLE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/queryTable'
)
REVISION_HEADERS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/revisionHeaders'
)
REVISION_LOG = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/revisionLog'
)
SETTINGS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/settings'
)
SHARED_STRINGS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/sharedStrings'
)
SHEET_METADATA = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/sheetMetadata'
)
SIGNATURE = (
'http://schemas.openxmlformats.org/package/2006/relationships/digita'
'l-signature/signature'
)
SLIDE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/slide'
)
SLIDE_LAYOUT = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/slideLayout'
)
SLIDE_MASTER = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/slideMaster'
)
SLIDE_UPDATE_INFO = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/slideUpdateInfo'
)
STYLES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/styles'
)
TABLE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/table'
)
TABLE_SINGLE_CELLS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/tableSingleCells'
)
TABLE_STYLES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/tableStyles'
)
TAGS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/tags'
)
THEME = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/theme'
)
THEME_OVERRIDE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/themeOverride'
)
THUMBNAIL = (
'http://schemas.openxmlformats.org/package/2006/relationships/metada'
'ta/thumbnail'
)
USERNAMES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/usernames'
)
VIDEO = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/video'
)
VIEW_PROPS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/viewProps'
)
VML_DRAWING = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/vmlDrawing'
)
VOLATILE_DEPENDENCIES = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/volatileDependencies'
)
WEB_SETTINGS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/webSettings'
)
WORKSHEET_SOURCE = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/worksheetSource'
)
XML_MAPS = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
'/xmlMaps'
)
| |
"""
API views
"""
import hashlib
import itertools
import json
import random
import urllib
from datetime import date, timedelta
from django.core.cache import cache
from django.db.transaction import non_atomic_requests
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_lazy, get_language
from django.utils.encoding import force_bytes
import commonware.log
import jingo
import waffle
from caching.base import cached_with
from jingo import get_standard_processors
from olympia import amo, legacy_api
from olympia.addons.models import Addon, CompatOverride
from olympia.amo.decorators import (
allow_cross_site_request, json_view)
from olympia.amo.models import manual_order
from olympia.amo.urlresolvers import get_url_prefix
from olympia.amo.utils import AMOJSONEncoder
from olympia.legacy_api.utils import addon_to_dict, extract_filters
from olympia.search.views import (
AddonSuggestionsAjax, PersonaSuggestionsAjax, name_query)
from olympia.versions.compare import version_int
ERROR = 'error'
OUT_OF_DATE = ugettext_lazy(
u"The API version, {0:.1f}, you are using is not valid. "
u"Please upgrade to the current version {1:.1f} API.")
xml_env = jingo.get_env().overlay()
old_finalize = xml_env.finalize
xml_env.finalize = lambda x: amo.helpers.strip_controls(old_finalize(x))
# Hard limit of 30. The buffer is to try for locale-specific add-ons.
MAX_LIMIT, BUFFER = 30, 10
# "New" is arbitrarily defined as 10 days old.
NEW_DAYS = 10
log = commonware.log.getLogger('z.api')
def partition(seq, key):
"""Group a sequence based into buckets by key(x)."""
groups = itertools.groupby(sorted(seq, key=key), key=key)
return ((k, list(v)) for k, v in groups)
def render_xml_to_string(request, template, context=None):
if context is None:
context = {}
if not jingo._helpers_loaded:
jingo.load_helpers()
for processor in get_standard_processors():
context.update(processor(request))
template = xml_env.get_template(template)
return template.render(context)
@non_atomic_requests
def render_xml(request, template, context=None, **kwargs):
"""Safely renders xml, stripping out nasty control characters."""
if context is None:
context = {}
rendered = render_xml_to_string(request, template, context)
if 'content_type' not in kwargs:
kwargs['content_type'] = 'text/xml'
return HttpResponse(rendered, **kwargs)
@non_atomic_requests
def handler403(request):
context = {'error_level': ERROR, 'msg': 'Not allowed'}
return render_xml(request, 'legacy_api/message.xml', context, status=403)
@non_atomic_requests
def handler404(request):
context = {'error_level': ERROR, 'msg': 'Not Found'}
return render_xml(request, 'legacy_api/message.xml', context, status=404)
@non_atomic_requests
def handler500(request):
context = {'error_level': ERROR, 'msg': 'Server Error'}
return render_xml(request, 'legacy_api/message.xml', context, status=500)
def validate_api_version(version):
"""
We want to be able to deprecate old versions of the API, therefore we check
for a minimum API version before continuing.
"""
if float(version) < legacy_api.MIN_VERSION:
return False
if float(version) > legacy_api.MAX_VERSION:
return False
return True
def addon_filter(addons, addon_type, limit, app, platform, version,
compat_mode='strict', shuffle=True):
"""
Filter addons by type, application, app version, and platform.
Add-ons that support the current locale will be sorted to front of list.
Shuffling will be applied to the add-ons supporting the locale and the
others separately.
Doing this in the database takes too long, so we in code and wrap it in
generous caching.
"""
APP = app
if addon_type.upper() != 'ALL':
try:
addon_type = int(addon_type)
if addon_type:
addons = [a for a in addons if a.type == addon_type]
except ValueError:
# `addon_type` is ALL or a type id. Otherwise we ignore it.
pass
# Take out personas since they don't have versions.
groups = dict(partition(addons,
lambda x: x.type == amo.ADDON_PERSONA))
personas, addons = groups.get(True, []), groups.get(False, [])
platform = platform.lower()
if platform != 'all' and platform in amo.PLATFORM_DICT:
def f(ps):
return pid in ps or amo.PLATFORM_ALL in ps
pid = amo.PLATFORM_DICT[platform]
addons = [a for a in addons
if f(a.current_version.supported_platforms)]
if version is not None:
vint = version_int(version)
def f_strict(app):
return app.min.version_int <= vint <= app.max.version_int
def f_ignore(app):
return app.min.version_int <= vint
xs = [(a, a.compatible_apps) for a in addons]
# Iterate over addons, checking compatibility depending on compat_mode.
addons = []
for addon, apps in xs:
app = apps.get(APP)
if compat_mode == 'strict':
if app and f_strict(app):
addons.append(addon)
elif compat_mode == 'ignore':
if app and f_ignore(app):
addons.append(addon)
elif compat_mode == 'normal':
# This does a db hit but it's cached. This handles the cases
# for strict opt-in, binary components, and compat overrides.
v = addon.compatible_version(APP.id, version, platform,
compat_mode)
if v: # There's a compatible version.
addons.append(addon)
# Put personas back in.
addons.extend(personas)
# We prefer add-ons that support the current locale.
lang = get_language()
def partitioner(x):
return x.description is not None and (x.description.locale == lang)
groups = dict(partition(addons, partitioner))
good, others = groups.get(True, []), groups.get(False, [])
if shuffle:
random.shuffle(good)
random.shuffle(others)
# If limit=0, we return all addons with `good` coming before `others`.
# Otherwise pad `good` if less than the limit and return the limit.
if limit > 0:
if len(good) < limit:
good.extend(others[:limit - len(good)])
return good[:limit]
else:
good.extend(others)
return good
class APIView(object):
"""
Base view class for all API views.
"""
@method_decorator(non_atomic_requests)
def __call__(self, request, api_version, *args, **kwargs):
self.version = float(api_version)
self.format = request.REQUEST.get('format', 'xml')
self.content_type = ('text/xml' if self.format == 'xml'
else 'application/json')
self.request = request
if not validate_api_version(api_version):
msg = OUT_OF_DATE.format(self.version, legacy_api.CURRENT_VERSION)
return self.render_msg(msg, ERROR, status=403,
content_type=self.content_type)
return self.process_request(*args, **kwargs)
def render_msg(self, msg, error_level=None, *args, **kwargs):
"""
Renders a simple message.
"""
if self.format == 'xml':
return render_xml(
self.request, 'legacy_api/message.xml',
{'error_level': error_level, 'msg': msg}, *args, **kwargs)
else:
return HttpResponse(json.dumps({'msg': _(msg)}), *args, **kwargs)
def render(self, template, context):
context['api_version'] = self.version
context['api'] = legacy_api
if self.format == 'xml':
return render_xml(self.request, template, context,
content_type=self.content_type)
else:
return HttpResponse(self.render_json(context),
content_type=self.content_type)
def render_json(self, context):
return json.dumps({'msg': _('Not implemented yet.')})
class AddonDetailView(APIView):
@allow_cross_site_request
def process_request(self, addon_id):
try:
# Nominated or public add-ons should be viewable using the legacy
# API detail endpoint.
addon = Addon.objects.valid().id_or_slug(addon_id).get()
except Addon.DoesNotExist:
# Add-on is either inexistent or not public/nominated.
return self.render_msg(
'Add-on not found!', ERROR, status=404,
content_type=self.content_type
)
return self.render_addon(addon)
def render_addon(self, addon):
return self.render('legacy_api/addon_detail.xml', {'addon': addon})
def render_json(self, context):
return json.dumps(addon_to_dict(context['addon']), cls=AMOJSONEncoder)
@non_atomic_requests
def guid_search(request, api_version, guids):
lang = request.LANG
def guid_search_cache_key(guid):
key = 'guid_search:%s:%s:%s' % (api_version, lang, guid)
return hashlib.md5(force_bytes(key)).hexdigest()
guids = [guid.strip() for guid in guids.split(',')] if guids else []
addons_xml = cache.get_many(
[guid_search_cache_key(guid) for guid in guids])
dirty_keys = set()
for guid in guids:
key = guid_search_cache_key(guid)
if key not in addons_xml:
dirty_keys.add(key)
try:
# Only search through public (and not disabled) add-ons.
addon = Addon.objects.public().get(guid=guid)
except Addon.DoesNotExist:
addons_xml[key] = ''
else:
addon_xml = render_xml_to_string(
request, 'legacy_api/includes/addon.xml', {
'addon': addon,
'api_version': api_version,
'api': legacy_api
})
addons_xml[key] = addon_xml
cache.set_many(dict((k, v) for k, v in addons_xml.iteritems()
if k in dirty_keys))
compat = (CompatOverride.objects.filter(guid__in=guids)
.transform(CompatOverride.transformer))
addons_xml = [v for v in addons_xml.values() if v]
return render_xml(request, 'legacy_api/search.xml', {
'addons_xml': addons_xml,
'total': len(addons_xml),
'compat': compat,
'api_version': api_version, 'api': legacy_api
})
class SearchView(APIView):
def process_request(self, query, addon_type='ALL', limit=10,
platform='ALL', version=None, compat_mode='strict'):
"""
Query the search backend and serve up the XML.
"""
limit = min(MAX_LIMIT, int(limit))
app_id = self.request.APP.id
# We currently filter for status=PUBLIC for all versions. If
# that changes, the contract for API version 1.5 requires
# that we continue filtering for it there.
filters = {
'app': app_id,
'status': amo.STATUS_PUBLIC,
'is_experimental': False,
'is_disabled': False,
'current_version__exists': True,
}
# Opts may get overridden by query string filters.
opts = {
'addon_type': addon_type,
'version': version,
}
# Specific case for Personas (bug 990768): if we search providing the
# Persona addon type (9), don't filter on the platform as Personas
# don't have compatible platforms to filter on.
if addon_type != '9':
opts['platform'] = platform
if self.version < 1.5:
# Fix doubly encoded query strings.
try:
query = urllib.unquote(query.encode('ascii'))
except UnicodeEncodeError:
# This fails if the string is already UTF-8.
pass
query, qs_filters, params = extract_filters(query, opts)
qs = Addon.search().query(or_=name_query(query))
filters.update(qs_filters)
if 'type' not in filters:
# Filter by ALL types, which is really all types except for apps.
filters['type__in'] = list(amo.ADDON_SEARCH_TYPES)
qs = qs.filter(**filters)
qs = qs[:limit]
total = qs.count()
results = []
for addon in qs:
compat_version = addon.compatible_version(app_id,
params['version'],
params['platform'],
compat_mode)
# Specific case for Personas (bug 990768): if we search providing
# the Persona addon type (9), then don't look for a compatible
# version.
if compat_version or addon_type == '9':
addon.compat_version = compat_version
results.append(addon)
if len(results) == limit:
break
else:
# We're excluding this addon because there are no
# compatible versions. Decrement the total.
total -= 1
return self.render('legacy_api/search.xml', {
'results': results,
'total': total,
# For caching
'version': version,
'compat_mode': compat_mode,
})
@json_view
@non_atomic_requests
def search_suggestions(request):
if waffle.sample_is_active('autosuggest-throttle'):
return HttpResponse(status=503)
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
items = suggesterClass(request, ratings=True).items
for s in items:
s['rating'] = float(s['rating'])
return {'suggestions': items}
class ListView(APIView):
def process_request(self, list_type='recommended', addon_type='ALL',
limit=10, platform='ALL', version=None,
compat_mode='strict'):
"""
Find a list of new or featured add-ons. Filtering is done in Python
for cache-friendliness and to avoid heavy queries.
"""
limit = min(MAX_LIMIT, int(limit))
APP, platform = self.request.APP, platform.lower()
qs = Addon.objects.listed(APP)
shuffle = True
if list_type in ('by_adu', 'featured'):
qs = qs.exclude(type=amo.ADDON_PERSONA)
if list_type == 'newest':
new = date.today() - timedelta(days=NEW_DAYS)
addons = (qs.filter(created__gte=new)
.order_by('-created'))[:limit + BUFFER]
elif list_type == 'by_adu':
addons = qs.order_by('-average_daily_users')[:limit + BUFFER]
shuffle = False # By_adu is an ordered list.
elif list_type == 'hotness':
# Filter to type=1 so we hit visible_idx. Only extensions have a
# hotness index right now so this is not incorrect.
addons = (qs.filter(type=amo.ADDON_EXTENSION)
.order_by('-hotness'))[:limit + BUFFER]
shuffle = False
else:
ids = Addon.featured_random(APP, self.request.LANG)
addons = manual_order(qs, ids[:limit + BUFFER], 'addons.id')
shuffle = False
args = (addon_type, limit, APP, platform, version, compat_mode,
shuffle)
def f():
return self._process(addons, *args)
return cached_with(addons, f, map(force_bytes, args))
def _process(self, addons, *args):
return self.render('legacy_api/list.xml',
{'addons': addon_filter(addons, *args)})
def render_json(self, context):
return json.dumps([addon_to_dict(a) for a in context['addons']],
cls=AMOJSONEncoder)
class LanguageView(APIView):
def process_request(self):
addons = (Addon.objects.public()
.filter(type=amo.ADDON_LPAPP,
appsupport__app=self.request.APP.id)
.order_by('pk'))
return self.render('legacy_api/list.xml', {'addons': addons,
'show_localepicker': True})
# pylint: disable-msg=W0613
@non_atomic_requests
def redirect_view(request, url):
"""
Redirect all requests that come here to an API call with a view parameter.
"""
dest = '/api/%.1f/%s' % (legacy_api.CURRENT_VERSION,
urllib.quote(url.encode('utf-8')))
dest = get_url_prefix().fix(dest)
return HttpResponsePermanentRedirect(dest)
| |
"""
Client class.
Base class for the clients.
"""
import logging
from abc import ABCMeta, abstractmethod
from json import dumps as serialize
from json import loads as deserialize
from typing import Any, Callable, Dict, Iterator, List, Optional, Union
from apply_defaults import apply_config, apply_self # type: ignore
from .config import config
from .exceptions import ReceivedErrorResponseError
from .log import log_
from .parse import parse
from .requests import Notification, Request
from .response import ErrorResponse, Response
request_log = logging.getLogger(__name__ + ".request")
response_log = logging.getLogger(__name__ + ".response")
class Client(metaclass=ABCMeta):
"""
Protocol-agnostic base class for clients.
Subclasses must override `send_message` to transport the message.
"""
DEFAULT_REQUEST_LOG_FORMAT = "--> %(message)s"
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s"
@apply_config(config, converters={"id_generator": "getcallable"})
def __init__(
self,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
basic_logging: bool = False,
) -> None:
"""
Args:
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
basic_logging: Will create log handlers to output request & response
messages.
"""
self.trim_log_values = trim_log_values
self.validate_against_schema = validate_against_schema
self.id_generator = id_generator
if basic_logging:
self.basic_logging()
def basic_logging(self) -> None:
"""
Call this on the client object to create log handlers to output request and
response messages.
"""
# Request handler
if len(request_log.handlers) == 0:
request_handler = logging.StreamHandler()
request_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_REQUEST_LOG_FORMAT)
)
request_log.addHandler(request_handler)
request_log.setLevel(logging.INFO)
# Response handler
if len(response_log.handlers) == 0:
response_handler = logging.StreamHandler()
response_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_RESPONSE_LOG_FORMAT)
)
response_log.addHandler(response_handler)
response_log.setLevel(logging.INFO)
@apply_self
def log_request(
self, request: str, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a request.
Args:
request: The JSON-RPC request string.
trim_log_values: Log an abbreviated version of the request.
"""
return log_(request, request_log, "info", trim=trim_log_values, **kwargs)
@apply_self
def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a response.
Note this is different to log_request, in that it takes a Response object, not a
string.
Args:
response: The Response object to log. Note this is different to log_request
which takes a string.
trim_log_values: Log an abbreviated version of the response.
"""
return log_(response.text, response_log, "info", trim=trim_log_values, **kwargs)
@abstractmethod
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
"""
def validate_response(self, response: Response) -> None:
"""
Can be overridden for custom validation of the response.
Raise an exception to fail validation.
"""
pass
@apply_self
def send(
self,
request: Union[str, Dict, List],
trim_log_values: bool = False,
validate_against_schema: bool = True,
**kwargs: Any
) -> Response:
"""
Send a request, passing the whole JSON-RPC request object.
After sending, logs, validates and parses.
>>> client.send('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
<Response[1]>
Args:
request: The JSON-RPC request. Can be either a JSON-encoded string or a
Request/Notification object.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
kwargs: Clients can use this to configure an single request. For example,
HTTPClient passes this through to `requests.Session.send()`.
in the case of a Notification.
"""
# We need both the serialized and deserialized version of the request
if isinstance(request, str):
request_text = request
request_deserialized = deserialize(request)
else:
request_text = serialize(request)
request_deserialized = request
batch = isinstance(request_deserialized, list)
response_expected = batch or "id" in request_deserialized
self.log_request(request_text, trim_log_values=trim_log_values)
response = self.send_message(
request_text, response_expected=response_expected, **kwargs
)
self.log_response(response, trim_log_values=trim_log_values)
self.validate_response(response)
response.data = parse(
response.text, batch=batch, validate_against_schema=validate_against_schema
)
# If received a single error response, raise
if isinstance(response.data, ErrorResponse):
raise ReceivedErrorResponseError(response.data)
return response
@apply_self
def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
"""
Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
"""
return self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
@apply_self
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
def __getattr__(self, name: str) -> Callable:
"""
This gives us an alternate way to make a request.
>>> client.cube(3)
--> {"jsonrpc": "2.0", "method": "cube", "params": [3], "id": 1}
That's the same as saying `client.request("cube", 3)`.
"""
def attr_handler(*args: Any, **kwargs: Any) -> Response:
return self.request(name, *args, **kwargs)
return attr_handler
| |
import scipy
import numpy as np
from pyqtgraph.metaarray import MetaArray
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(np.ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def applyFilter(data, b, a, padding=100, bidir=True):
"""Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
and/or run the filter in both directions."""
d1 = data.view(np.ndarray)
if padding > 0:
d1 = np.hstack([d1[:padding], d1, d1[-padding:]])
if bidir:
d1 = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
else:
d1 = scipy.signal.lfilter(b, a, d1)
if padding > 0:
d1 = d1[padding:-padding]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d1, info=data.infoCopy())
else:
return d1
def besselFilter(data, cutoff, order=1, dt=None, btype='low', bidir=True):
"""return data passed through bessel filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
dt = 1.0
b,a = scipy.signal.bessel(order, cutoff * dt, btype=btype)
return applyFilter(data, b, a, bidir=bidir)
#base = data.mean()
#d1 = scipy.signal.lfilter(b, a, data.view(ndarray)-base) + base
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
#return MetaArray(d1, info=data.infoCopy())
#return d1
def butterworthFilter(data, wPass, wStop=None, gPass=2.0, gStop=20.0, order=1, dt=None, btype='low', bidir=True):
"""return data passed through bessel filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
dt = 1.0
if wStop is None:
wStop = wPass * 2.0
ord, Wn = scipy.signal.buttord(wPass*dt*2., wStop*dt*2., gPass, gStop)
#print "butterworth ord %f Wn %f c %f sc %f" % (ord, Wn, cutoff, stopCutoff)
b,a = scipy.signal.butter(ord, Wn, btype=btype)
return applyFilter(data, b, a, bidir=bidir)
def rollingSum(data, n):
d1 = data.copy()
d1[1:] += d1[:-1] # integrate
d2 = np.empty(len(d1) - n + 1, dtype=data.dtype)
d2[0] = d1[n-1] # copy first point
d2[1:] = d1[n:] - d1[:-n] # subtract
return d2
def mode(data, bins=None):
"""Returns location max value from histogram."""
if bins is None:
bins = int(len(data)/10.)
if bins < 2:
bins = 2
y, x = np.histogram(data, bins=bins)
ind = np.argmax(y)
mode = 0.5 * (x[ind] + x[ind+1])
return mode
def modeFilter(data, window=500, step=None, bins=None):
"""Filter based on histogram-based mode function"""
d1 = data.view(np.ndarray)
vals = []
l2 = int(window/2.)
if step is None:
step = l2
i = 0
while True:
if i > len(data)-step:
break
vals.append(mode(d1[i:i+window], bins))
i += step
chunks = [np.linspace(vals[0], vals[0], l2)]
for i in range(len(vals)-1):
chunks.append(np.linspace(vals[i], vals[i+1], step))
remain = len(data) - step*(len(vals)-1) - l2
chunks.append(np.linspace(vals[-1], vals[-1], remain))
d2 = np.hstack(chunks)
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d2, info=data.infoCopy())
return d2
def denoise(data, radius=2, threshold=4):
"""Very simple noise removal function. Compares a point to surrounding points,
replaces with nearby values if the difference is too large."""
r2 = radius * 2
d1 = data.view(np.ndarray)
d2 = d1[radius:] - d1[:-radius] #a derivative
#d3 = data[r2:] - data[:-r2]
#d4 = d2 - d3
stdev = d2.std()
#print "denoise: stdev of derivative:", stdev
mask1 = d2 > stdev*threshold #where derivative is large and positive
mask2 = d2 < -stdev*threshold #where derivative is large and negative
maskpos = mask1[:-radius] * mask2[radius:] #both need to be true
maskneg = mask1[radius:] * mask2[:-radius]
mask = maskpos + maskneg
d5 = np.where(mask, d1[:-r2], d1[radius:-radius]) #where both are true replace the value with the value from 2 points before
d6 = np.empty(d1.shape, dtype=d1.dtype) #add points back to the ends
d6[radius:-radius] = d5
d6[:radius] = d1[:radius]
d6[-radius:] = d1[-radius:]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d6, info=data.infoCopy())
return d6
def adaptiveDetrend(data, x=None, threshold=3.0):
"""Return the signal with baseline removed. Discards outliers from baseline measurement."""
if x is None:
x = data.xvals(0)
d = data.view(np.ndarray)
d2 = scipy.signal.detrend(d)
stdev = d2.std()
mask = abs(d2) < stdev*threshold
#d3 = where(mask, 0, d2)
#d4 = d2 - lowPass(d3, cutoffs[1], dt=dt)
lr = stats.linregress(x[mask], d[mask])
base = lr[1] + lr[0]*x
d4 = d - base
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d4, info=data.infoCopy())
return d4
def histogramDetrend(data, window=500, bins=50, threshold=3.0, offsetOnly=False):
"""Linear detrend. Works by finding the most common value at the beginning and end of a trace, excluding outliers.
If offsetOnly is True, then only the offset from the beginning of the trace is subtracted.
"""
d1 = data.view(np.ndarray)
d2 = [d1[:window], d1[-window:]]
v = [0, 0]
for i in [0, 1]:
d3 = d2[i]
stdev = d3.std()
mask = abs(d3-np.median(d3)) < stdev*threshold
d4 = d3[mask]
y, x = np.histogram(d4, bins=bins)
ind = np.argmax(y)
v[i] = 0.5 * (x[ind] + x[ind+1])
if offsetOnly:
d3 = data.view(np.ndarray) - v[0]
else:
base = np.linspace(v[0], v[1], len(data))
d3 = data.view(np.ndarray) - base
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d3, info=data.infoCopy())
return d3
def concatenateColumns(data):
"""Returns a single record array with columns taken from the elements in data.
data should be a list of elements, which can be either record arrays or tuples (name, type, data)
"""
## first determine dtype
dtype = []
names = set()
maxLen = 0
for element in data:
if isinstance(element, np.ndarray):
## use existing columns
for i in range(len(element.dtype)):
name = element.dtype.names[i]
dtype.append((name, element.dtype[i]))
maxLen = max(maxLen, len(element))
else:
name, type, d = element
if type is None:
type = suggestDType(d)
dtype.append((name, type))
if isinstance(d, list) or isinstance(d, np.ndarray):
maxLen = max(maxLen, len(d))
if name in names:
raise Exception('Name "%s" repeated' % name)
names.add(name)
## create empty array
out = np.empty(maxLen, dtype)
## fill columns
for element in data:
if isinstance(element, np.ndarray):
for i in range(len(element.dtype)):
name = element.dtype.names[i]
try:
out[name] = element[name]
except:
print("Column:", name)
print("Input shape:", element.shape, element.dtype)
print("Output shape:", out.shape, out.dtype)
raise
else:
name, type, d = element
out[name] = d
return out
def suggestDType(x):
"""Return a suitable dtype for x"""
if isinstance(x, list) or isinstance(x, tuple):
if len(x) == 0:
raise Exception('can not determine dtype for empty list')
x = x[0]
if hasattr(x, 'dtype'):
return x.dtype
elif isinstance(x, float):
return float
elif isinstance(x, int):
return int
#elif isinstance(x, basestring): ## don't try to guess correct string length; use object instead.
#return '<U%d' % len(x)
else:
return object
def removePeriodic(data, f0=60.0, dt=None, harmonics=10, samples=4):
if (hasattr(data, 'implements') and data.implements('MetaArray')):
data1 = data.asarray()
if dt is None:
times = data.xvals('Time')
dt = times[1]-times[0]
else:
data1 = data
if dt is None:
raise Exception('Must specify dt for this data')
ft = np.fft.fft(data1)
## determine frequencies in fft data
df = 1.0 / (len(data1) * dt)
freqs = np.linspace(0.0, (len(ft)-1) * df, len(ft))
## flatten spikes at f0 and harmonics
for i in xrange(1, harmonics + 2):
f = f0 * i # target frequency
## determine index range to check for this frequency
ind1 = int(np.floor(f / df))
ind2 = int(np.ceil(f / df)) + (samples-1)
if ind1 > len(ft)/2.:
break
mag = (abs(ft[ind1-1]) + abs(ft[ind2+1])) * 0.5
for j in range(ind1, ind2+1):
phase = np.angle(ft[j]) ## Must preserve the phase of each point, otherwise any transients in the trace might lead to large artifacts.
re = mag * np.cos(phase)
im = mag * np.sin(phase)
ft[j] = re + im*1j
ft[len(ft)-j] = re - im*1j
data2 = np.fft.ifft(ft).real
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return metaarray.MetaArray(data2, info=data.infoCopy())
else:
return data2
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import datetime
import json
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import rpc
from nova.rpc import common as rpc_common
from nova.scheduler import driver
from nova.scheduler import manager
from nova import test
from nova.tests.scheduler import fakes
from nova import utils
FLAGS = flags.FLAGS
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager"""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'nova.scheduler.driver.Scheduler'
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertTrue(isinstance(manager.driver, self.driver_cls))
def test_get_host_list(self):
expected = 'fake_hosts'
self.mox.StubOutWithMock(self.manager.driver, 'get_host_list')
self.manager.driver.get_host_list().AndReturn(expected)
self.mox.ReplayAll()
result = self.manager.get_host_list(self.context)
self.assertEqual(result, expected)
def test_get_zone_list(self):
expected = 'fake_zones'
self.mox.StubOutWithMock(self.manager.driver, 'get_zone_list')
self.manager.driver.get_zone_list().AndReturn(expected)
self.mox.ReplayAll()
result = self.manager.get_zone_list(self.context)
self.assertEqual(result, expected)
def test_get_service_capabilities(self):
expected = 'fake_service_capabs'
self.mox.StubOutWithMock(self.manager.driver,
'get_service_capabilities')
self.manager.driver.get_service_capabilities().AndReturn(
expected)
self.mox.ReplayAll()
result = self.manager.get_service_capabilities(self.context)
self.assertEqual(result, expected)
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
self.mox.StubOutWithMock(self.manager.driver,
'update_service_capabilities')
# Test no capabilities passes empty dictionary
self.manager.driver.update_service_capabilities(service_name,
host, {})
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host)
self.mox.VerifyAll()
self.mox.ResetAll()
# Test capabilities passes correctly
capabilities = {'fake_capability': 'fake_value'}
self.manager.driver.update_service_capabilities(
service_name, host, capabilities)
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host,
capabilities=capabilities)
def test_existing_method(self):
def stub_method(self, *args, **kwargs):
pass
setattr(self.manager.driver, 'schedule_stub_method', stub_method)
self.mox.StubOutWithMock(self.manager.driver,
'schedule_stub_method')
self.manager.driver.schedule_stub_method(self.context,
*self.fake_args, **self.fake_kwargs)
self.mox.ReplayAll()
self.manager.stub_method(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_missing_method_fallback(self):
self.mox.StubOutWithMock(self.manager.driver, 'schedule')
self.manager.driver.schedule(self.context, self.topic,
'noexist', *self.fake_args, **self.fake_kwargs)
self.mox.ReplayAll()
self.manager.noexist(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_select(self):
expected = 'fake_select'
self.mox.StubOutWithMock(self.manager.driver, 'select')
self.manager.driver.select(self.context,
*self.fake_args, **self.fake_kwargs).AndReturn(expected)
self.mox.ReplayAll()
result = self.manager.select(self.context, *self.fake_args,
**self.fake_kwargs)
self.assertEqual(result, expected)
def test_show_host_resources(self):
host = 'fake_host'
computes = [{'host': host,
'compute_node': [{'vcpus': 4,
'vcpus_used': 2,
'memory_mb': 1024,
'memory_mb_used': 512,
'local_gb': 1024,
'local_gb_used': 512}]}]
instances = [{'project_id': 'project1',
'vcpus': 1,
'memory_mb': 128,
'root_gb': 128,
'ephemeral_gb': 0},
{'project_id': 'project1',
'vcpus': 2,
'memory_mb': 256,
'root_gb': 384,
'ephemeral_gb': 0},
{'project_id': 'project2',
'vcpus': 2,
'memory_mb': 256,
'root_gb': 256,
'ephemeral_gb': 0}]
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.service_get_all_compute_by_host(self.context, host).AndReturn(
computes)
db.instance_get_all_by_host(self.context, host).AndReturn(instances)
self.mox.ReplayAll()
result = self.manager.show_host_resources(self.context, host)
expected = {'usage': {'project1': {'memory_mb': 384,
'vcpus': 3,
'root_gb': 512,
'ephemeral_gb': 0},
'project2': {'memory_mb': 256,
'vcpus': 2,
'root_gb': 256,
'ephemeral_gb': 0}},
'resource': {'vcpus': 4,
'vcpus_used': 2,
'local_gb': 1024,
'local_gb_used': 512,
'memory_mb': 1024,
'memory_mb_used': 512}}
self.assertDictMatch(result, expected)
def test_run_instance_exception_puts_instance_in_error_state(self):
"""Test that a NoValidHost exception for run_instance puts
the instance in ERROR state and eats the exception.
"""
fake_instance_uuid = 'fake-instance-id'
# Make sure the method exists that we're going to test call
def stub_method(*args, **kwargs):
pass
setattr(self.manager.driver, 'schedule_run_instance', stub_method)
self.mox.StubOutWithMock(self.manager.driver,
'schedule_run_instance')
self.mox.StubOutWithMock(db, 'instance_update')
request_spec = {'instance_properties':
{'uuid': fake_instance_uuid}}
self.fake_kwargs['request_spec'] = request_spec
self.manager.driver.schedule_run_instance(self.context,
*self.fake_args, **self.fake_kwargs).AndRaise(
exception.NoValidHost(reason=""))
db.instance_update(self.context, fake_instance_uuid,
{'vm_state': vm_states.ERROR})
self.mox.ReplayAll()
self.manager.run_instance(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class"""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
def test_get_host_list(self):
expected = 'fake_hosts'
self.mox.StubOutWithMock(self.driver.host_manager, 'get_host_list')
self.driver.host_manager.get_host_list().AndReturn(expected)
self.mox.ReplayAll()
result = self.driver.get_host_list()
self.assertEqual(result, expected)
def test_get_zone_list(self):
expected = 'fake_zones'
self.mox.StubOutWithMock(self.driver.zone_manager, 'get_zone_list')
self.driver.zone_manager.get_zone_list().AndReturn(expected)
self.mox.ReplayAll()
result = self.driver.get_zone_list()
self.assertEqual(result, expected)
def test_get_service_capabilities(self):
expected = 'fake_service_capabs'
self.mox.StubOutWithMock(self.driver.host_manager,
'get_service_capabilities')
self.driver.host_manager.get_service_capabilities().AndReturn(
expected)
self.mox.ReplayAll()
result = self.driver.get_service_capabilities()
self.assertEqual(result, expected)
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
self.mox.StubOutWithMock(self.driver.host_manager,
'update_service_capabilities')
capabilities = {'fake_capability': 'fake_value'}
self.driver.host_manager.update_service_capabilities(
service_name, host, capabilities)
self.mox.ReplayAll()
result = self.driver.update_service_capabilities(service_name,
host, capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
db.service_get_all_by_topic(self.context,
self.topic).AndReturn(services)
utils.service_is_up(service1).AndReturn(False)
utils.service_is_up(service2).AndReturn(True)
self.mox.ReplayAll()
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
def test_create_instance_db_entry(self):
base_options = {'fake_option': 'meow'}
image = 'fake_image'
instance_type = 'fake_instance_type'
security_group = 'fake_security_group'
block_device_mapping = 'fake_block_device_mapping'
request_spec = {'instance_properties': base_options,
'image': image,
'instance_type': instance_type,
'security_group': security_group,
'block_device_mapping': block_device_mapping}
self.mox.StubOutWithMock(self.driver.compute_api,
'create_db_entry_for_new_instance')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
# New entry
fake_instance = {'uuid': 'fake-uuid'}
self.driver.compute_api.create_db_entry_for_new_instance(
self.context, instance_type, image, base_options,
security_group,
block_device_mapping).AndReturn(fake_instance)
self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context,
request_spec)
self.mox.VerifyAll()
self.assertEqual(instance, fake_instance)
# Entry created by compute already
self.mox.ResetAll()
fake_uuid = 'fake-uuid'
base_options['uuid'] = fake_uuid
fake_instance = {'uuid': fake_uuid}
db.instance_get_by_uuid(self.context, fake_uuid).AndReturn(
fake_instance)
self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context,
request_spec)
self.assertEqual(instance, fake_instance)
def _live_migration_instance(self):
volume1 = {'id': 31338}
volume2 = {'id': 31339}
return {'id': 31337, 'name': 'fake-instance',
'host': 'fake_host1',
'volumes': [volume1, volume2],
'power_state': power_state.RUNNING,
'memory_mb': 1024,
'root_gb': 1024,
'ephemeral_gb': 0}
def test_live_migration_basic(self):
"""Test basic schedule_live_migration functionality"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
self.mox.StubOutWithMock(db, 'instance_update')
self.mox.StubOutWithMock(db, 'volume_update')
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
self.driver._live_migration_common_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.instance_update(self.context, instance['id'],
{'vm_state': vm_states.MIGRATING})
db.volume_update(self.context, instance['volumes'][0]['id'],
{'status': 'migrating'})
db.volume_update(self.context, instance['volumes'][1]['id'],
{'status': 'migrating'})
driver.cast_to_compute_host(self.context, instance['host'],
'live_migration', update_db=False,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
self.mox.ReplayAll()
self.driver.schedule_live_migration(self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_all_checks_pass(self):
"""Test live migration when all checks pass."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'instance_update')
self.mox.StubOutWithMock(db, 'volume_update')
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
dest = 'fake_host2'
block_migration = True
disk_over_commit = True
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
# Source checks (volume and source compute are up)
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
['fake_service'])
utils.service_is_up('fake_service').AndReturn(True)
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(['fake_service2'])
utils.service_is_up('fake_service2').AndReturn(True)
# Destination checks (compute is up, enough memory, disk)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
# assert_compute_node_has_enough_memory()
self.driver._get_compute_info(self.context, dest,
'memory_mb').AndReturn(2048)
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=256), dict(memory_mb=512)])
# assert_compute_node_has_enough_disk()
self.driver._get_compute_info(self.context, dest,
'disk_available_least').AndReturn(1025)
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue1')
rpc.call(self.context, 'src_queue1',
{'method': 'get_instance_disk_info',
'args': {'instance_name': instance['name']}}).AndReturn(
json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
# Common checks (shared storage ok, same hypervisor,e tc)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(False)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
# newer hypervisor version for src
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1,
'cpu_info': 'fake_cpu_info'}]}])
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
rpc.call(self.context, 'dest_queue',
{'method': 'compare_cpu',
'args': {'cpu_info': 'fake_cpu_info'}}).AndReturn(True)
db.instance_update(self.context, instance['id'],
{'vm_state': vm_states.MIGRATING})
db.volume_update(self.context, instance['volumes'][0]['id'],
{'status': 'migrating'})
db.volume_update(self.context, instance['volumes'][1]['id'],
{'status': 'migrating'})
driver.cast_to_compute_host(self.context, instance['host'],
'live_migration', update_db=False,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
self.mox.ReplayAll()
result = self.driver.schedule_live_migration(self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(result, None)
def test_live_migration_instance_not_running(self):
"""The instance given by instance_id is not running."""
self.mox.StubOutWithMock(db, 'instance_get')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
instance['power_state'] = power_state.NOSTATE
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.mox.ReplayAll()
c = False
try:
self.driver.schedule_live_migration(self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
self._test_scheduler_live_migration(options)
except exception.Invalid, e:
c = (str(e).find('is not running') > 0)
self.assertTrue(c)
def test_live_migration_volume_node_not_alive(self):
"""Raise exception when volume node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
# Volume down
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
['fake_service'])
utils.service_is_up('fake_service').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.VolumeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
def test_live_migration_compute_src_not_alive(self):
"""Raise exception when src compute node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
# Volume up
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
['fake_service'])
utils.service_is_up('fake_service').AndReturn(True)
# Compute down
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(['fake_service2'])
utils.service_is_up('fake_service2').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
def test_live_migration_compute_dest_not_alive(self):
"""Raise exception when dest compute node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
# Compute is down
utils.service_is_up('fake_service3').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
def test_live_migration_dest_check_service_same_host(self):
"""Confirms exception raises in case dest and src is same host."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.UnableToMigrateToSelf,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self):
"""Confirms exception raises when dest doesn't have enough memory."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest,
'memory_mb').AndReturn(2048)
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=1024), dict(memory_mb=512)])
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_block_migration_dest_check_service_lack_disk(self):
"""Confirms exception raises when dest doesn't have enough disk."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(self.driver,
'assert_compute_node_has_enough_memory')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
dest = 'fake_host2'
block_migration = True
disk_over_commit = True
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
# Enough memory
self.driver.assert_compute_node_has_enough_memory(self.context,
instance, dest)
# Not enough disk
self.driver._get_compute_info(self.context, dest,
'disk_available_least').AndReturn(1023)
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
rpc.call(self.context, 'src_queue',
{'method': 'get_instance_disk_info',
'args': {'instance_name': instance['name']}}).AndReturn(
json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_different_shared_storage_raises(self):
"""Src and dest must have same shared storage for live migration"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(False)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
self.mox.ReplayAll()
# FIXME(comstud): See LP891756.
self.assertRaises(exception.FileNotFound,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_same_shared_storage_okay(self):
"""live migration works with same src and dest shared storage"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(False)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
self.mox.ReplayAll()
# FIXME(comstud): See LP891756.
self.assertRaises(exception.FileNotFound,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self):
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(True)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
# different hypervisor type
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'not-xen',
'hypervisor_version': 1}]}])
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_hypervisor_version_older_raises(self):
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(True)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
# newer hypervisor version for src
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 2}]}])
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_host_incompatable_cpu_raises(self):
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(True)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1,
'cpu_info': 'fake_cpu_info'}]}])
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
rpc.call(self.context, 'dest_queue',
{'method': 'compare_cpu',
'args': {'cpu_info': 'fake_cpu_info'}}).AndRaise(
rpc.RemoteError())
self.mox.ReplayAll()
self.assertRaises(rpc_common.RemoteError,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods"""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
def test_cast_to_volume_host_update_db_with_volume_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'volume_id': 31337,
'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(utils, 'utcnow')
self.mox.StubOutWithMock(db, 'volume_update')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
utils.utcnow().AndReturn('fake-now')
db.volume_update(self.context, 31337,
{'host': host, 'scheduled_at': 'fake-now'})
db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_volume_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_volume_host_update_db_without_volume_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_volume_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_volume_host_no_update_db(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_volume_host(self.context, host, method,
update_db=False, **fake_kwargs)
def test_cast_to_compute_host_update_db_with_instance_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'instance_id': 31337,
'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(utils, 'utcnow')
self.mox.StubOutWithMock(db, 'instance_update')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
utils.utcnow().AndReturn('fake-now')
db.instance_update(self.context, 31337,
{'host': host, 'scheduled_at': 'fake-now'})
db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_compute_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_compute_host_update_db_without_instance_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_compute_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_compute_host_no_update_db(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_compute_host(self.context, host, method,
update_db=False, **fake_kwargs)
def test_cast_to_network_host(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'network', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_network_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_host_compute_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
driver.cast_to_compute_host(self.context, host, method,
update_db=False, **fake_kwargs)
self.mox.ReplayAll()
driver.cast_to_host(self.context, 'compute', host, method,
update_db=False, **fake_kwargs)
def test_cast_to_host_volume_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
self.mox.StubOutWithMock(driver, 'cast_to_volume_host')
driver.cast_to_volume_host(self.context, host, method,
update_db=False, **fake_kwargs)
self.mox.ReplayAll()
driver.cast_to_host(self.context, 'volume', host, method,
update_db=False, **fake_kwargs)
def test_cast_to_host_network_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
self.mox.StubOutWithMock(driver, 'cast_to_network_host')
driver.cast_to_network_host(self.context, host, method,
update_db=False, **fake_kwargs)
self.mox.ReplayAll()
driver.cast_to_host(self.context, 'network', host, method,
update_db=False, **fake_kwargs)
def test_cast_to_host_unknown_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
topic = 'unknown'
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, topic, host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_host(self.context, topic, host, method,
update_db=False, **fake_kwargs)
def test_encode_instance(self):
instance = {'id': 31337,
'test_arg': 'meow'}
result = driver.encode_instance(instance, True)
expected = {'id': instance['id'], '_is_precooked': False}
self.assertDictMatch(result, expected)
# Orig dict not changed
self.assertNotEqual(result, instance)
result = driver.encode_instance(instance, False)
expected = {}
expected.update(instance)
expected['_is_precooked'] = True
self.assertDictMatch(result, expected)
# Orig dict not changed
self.assertNotEqual(result, instance)
| |
# -*- coding: utf8 -*-
#
# Copyright (c) 2016 Linux Documentation Project
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import io
import os
import uuid
import errno
import codecs
import random
from tempfile import NamedTemporaryFile as ntf
from argparse import Namespace
from tldptesttools import TestInventoryBase, TestToolsFilesystem
from tldp.typeguesser import knowndoctypes
from tldp.inventory import stypes, status_types
from tldp.sources import SourceDocument
from tldp.outputs import OutputDirectory
# -- Test Data
import example
# -- SUT
import tldp.config
import tldp.driver
# -- shorthand
opj = os.path.join
opd = os.path.dirname
opa = os.path.abspath
sampledocs = opj(opd(__file__), 'sample-documents')
widths = Namespace(status=20, stem=50)
class TestDriverDetail(TestInventoryBase):
def test_stale_detail_verbosity(self):
c = self.config
self.add_stale('Stale-HOWTO', example.ex_docbook4xml)
c.verbose = True,
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
docs = inv.all.values()
stdout = io.StringIO()
tldp.driver.detail(c, docs, file=stdout)
stdout.seek(0)
self.assertTrue('changed source' in stdout.read())
def test_broken_detail_verbosity(self):
c = self.config
self.add_broken('Broken-HOWTO', example.ex_docbook4xml)
c.verbose = True,
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
docs = inv.all.values()
stdout = io.StringIO()
tldp.driver.detail(c, docs, file=stdout)
stdout.seek(0)
self.assertTrue('missing output' in stdout.read())
def test_orphan_verbosity(self):
c = self.config
self.add_orphan('Orphan-HOWTO', example.ex_docbook4xml)
c.verbose = True,
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
docs = inv.all.values()
stdout = io.StringIO()
tldp.driver.detail(c, docs, file=stdout)
stdout.seek(0)
self.assertTrue('missing source' in stdout.read())
def test_run_detail(self):
self.add_published('Published-HOWTO', example.ex_linuxdoc)
self.add_new('New-HOWTO', example.ex_linuxdoc)
self.add_stale('Stale-HOWTO', example.ex_linuxdoc)
self.add_orphan('Orphan-HOWTO', example.ex_linuxdoc)
self.add_broken('Broken-HOWTO', example.ex_linuxdoc)
argv = self.argv
argv.append('--detail')
exitcode = tldp.driver.run(argv)
self.assertEqual(exitcode, os.EX_OK)
class TestDriverShowDoctypes(TestToolsFilesystem):
def test_show_doctypes(self):
tf = ntf(dir=self.tempdir, prefix='doctypes-', delete=False)
tf.close()
with codecs.open(tf.name, 'w', encoding='utf-8') as f:
result = tldp.driver.show_doctypes(Namespace(), file=f)
self.assertEqual(result, os.EX_OK)
with codecs.open(f.name, encoding='utf-8') as x:
stdout = x.read()
for doctype in knowndoctypes:
self.assertTrue(doctype.formatname in stdout)
def test_show_doctypes_extraargs(self):
result = tldp.driver.show_doctypes(Namespace(), 'bogus')
self.assertTrue('Extra arguments' in result)
def test_run_doctypes(self):
exitcode = tldp.driver.run(['--doctypes'])
self.assertEqual(exitcode, os.EX_OK)
class TestDriverShowStatustypes(TestToolsFilesystem):
def test_show_statustypes(self):
stdout = io.StringIO()
result = tldp.driver.show_statustypes(Namespace(), file=stdout)
self.assertEqual(result, os.EX_OK)
stdout.seek(0)
data = stdout.read()
for status in status_types:
self.assertTrue(stypes[status] in data)
def test_show_statustypes_extraargs(self):
result = tldp.driver.show_statustypes(Namespace(), 'bogus')
self.assertTrue('Extra arguments' in result)
def test_run_statustypes(self):
exitcode = tldp.driver.run(['--statustypes'])
self.assertEqual(exitcode, os.EX_OK)
class TestDriverSummary(TestInventoryBase):
def test_run_summary(self):
self.add_published('Published-HOWTO', example.ex_linuxdoc)
self.add_new('New-HOWTO', example.ex_linuxdoc)
self.add_stale('Stale-HOWTO', example.ex_linuxdoc)
self.add_orphan('Orphan-HOWTO', example.ex_linuxdoc)
self.add_broken('Broken-HOWTO', example.ex_linuxdoc)
argv = self.argv
argv.append('--summary')
exitcode = tldp.driver.run(argv)
self.assertEqual(exitcode, os.EX_OK)
def test_summary_extraargs(self):
result = tldp.driver.summary(Namespace(), 'bogus')
self.assertTrue('Extra arguments' in result)
def test_summary_pubdir(self):
self.config.pubdir = None
result = tldp.driver.summary(self.config)
self.assertTrue('Option --pubdir' in result)
def test_summary_sourcedir(self):
self.config.sourcedir = None
result = tldp.driver.summary(self.config)
self.assertTrue('Option --sourcedir' in result)
def publishDocumentsWithLongNames(self, count):
names = list()
for _ in range(count):
x = str(uuid.uuid4())
names.append(x)
self.add_published(x, random.choice(example.sources))
return names
def test_summary_longnames(self):
c = self.config
names = self.publishDocumentsWithLongNames(5)
stdout = io.StringIO()
result = tldp.driver.summary(c, file=stdout)
self.assertEqual(result, os.EX_OK)
stdout.seek(0)
data = stdout.read()
self.assertTrue('and 4 more' in data)
c.verbose = True
stdout = io.StringIO()
result = tldp.driver.summary(c, file=stdout)
self.assertEqual(result, os.EX_OK)
stdout.seek(0)
data = stdout.read()
for name in names:
self.assertTrue(name in data)
def publishDocumentsWithShortNames(self, count):
names = list()
for _ in range(count):
x = hex(random.randint(0, 2**32))
names.append(x)
self.add_published(x, random.choice(example.sources))
return names
def test_summary_short(self):
c = self.config
names = self.publishDocumentsWithShortNames(20)
stdout = io.StringIO()
result = tldp.driver.summary(c, file=stdout)
self.assertEqual(result, os.EX_OK)
stdout.seek(0)
data = stdout.read()
self.assertTrue('and 16 more' in data)
c.verbose = True
stdout = io.StringIO()
result = tldp.driver.summary(c, file=stdout)
self.assertEqual(result, os.EX_OK)
stdout.seek(0)
data = stdout.read()
for name in names:
self.assertTrue(name in data)
class TestcreateBuildDirectory(TestToolsFilesystem):
def test_createBuildDirectory(self):
d = os.path.join(self.tempdir, 'child', 'grandchild')
ready, error = tldp.driver.createBuildDirectory(d)
self.assertFalse(ready)
self.assertEqual(error, errno.ENOENT)
class Testbuilddir_setup(TestToolsFilesystem):
def test_builddir_setup_default(self):
config = Namespace()
_, config.pubdir = self.adddir('pubdir')
config.builddir = None
ready, error = tldp.driver.builddir_setup(config)
self.assertTrue(ready)
def test_builddir_setup_specified(self):
config = Namespace()
_, config.pubdir = self.adddir('pubdir')
_, config.builddir = self.adddir('builddir')
ready, error = tldp.driver.builddir_setup(config)
self.assertTrue(ready)
class TestremoveUnknownDoctypes(TestToolsFilesystem):
def test_removeUnknownDoctypes(self):
docs = list()
docs.append(SourceDocument(opj(sampledocs, 'Unknown-Doctype.xqf')))
docs.append(SourceDocument(opj(sampledocs, 'linuxdoc-simple.sgml')))
result = tldp.driver.removeUnknownDoctypes(docs)
self.assertEqual(1, len(result))
class Test_prepare_docs_script_mode(TestToolsFilesystem):
def test_prepare_docs_script_mode_basic(self):
config = Namespace(pubdir=self.tempdir)
doc = SourceDocument(opj(sampledocs, 'linuxdoc-simple.sgml'))
self.assertIsNone(doc.working)
tldp.driver.prepare_docs_script_mode(config, [doc])
self.assertIsInstance(doc.working, OutputDirectory)
def test_prepare_docs_script_mode_existing_output(self):
config = Namespace(pubdir=self.tempdir)
doc = SourceDocument(opj(sampledocs, 'linuxdoc-simple.sgml'))
doc.output = OutputDirectory.fromsource(config.pubdir, doc)
self.assertIsNone(doc.working)
tldp.driver.prepare_docs_script_mode(config, [doc])
self.assertIs(doc.working, doc.output)
class Test_prepare_docs_build_mode(TestInventoryBase):
def test_prepare_docs_build_mode(self):
c = self.config
doc = SourceDocument(opj(sampledocs, 'linuxdoc-simple.sgml'))
self.assertIsNone(doc.working)
tldp.driver.prepare_docs_build_mode(c, [doc])
self.assertIsInstance(doc.working, OutputDirectory)
def test_prepare_docs_build_mode_nobuilddir(self):
c = self.config
os.rmdir(c.builddir)
doc = SourceDocument(opj(sampledocs, 'linuxdoc-simple.sgml'))
ready, error = tldp.driver.prepare_docs_build_mode(c, [doc])
self.assertFalse(ready)
class Test_post_publish_cleanup(TestInventoryBase):
def test_post_publish_cleanup_enotempty(self):
c = self.config
doc = SourceDocument(opj(sampledocs, 'linuxdoc-simple.sgml'))
tldp.driver.prepare_docs_build_mode(c, [doc])
with open(opj(doc.dtworkingdir, 'annoyance-file.txt'), 'w'):
pass
tldp.driver.post_publish_cleanup([doc.dtworkingdir])
self.assertTrue(os.path.isdir(doc.dtworkingdir))
class TestDriverRun(TestInventoryBase):
def test_run(self):
c = self.config
ex = example.ex_linuxdoc
self.add_published('Published-HOWTO', ex)
self.add_new('New-HOWTO', ex)
self.add_stale('Stale-HOWTO', ex)
self.add_orphan('Orphan-HOWTO', ex)
self.add_broken('Broken-HOWTO', ex)
fullpath = opj(self.tempdir, 'sources', 'New-HOWTO.sgml')
argv = self.argv
argv.extend(['--publish', 'stale', 'Orphan-HOWTO', fullpath])
exitcode = tldp.driver.run(argv)
self.assertEqual(exitcode, os.EX_OK)
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
self.assertEqual(4, len(inv.published.keys()))
self.assertEqual(1, len(inv.broken.keys()))
def test_run_no_work(self):
self.add_published('Published-HOWTO', example.ex_linuxdoc)
exitcode = tldp.driver.run(self.argv)
# -- improvement: check for 'No work to do.' from logger
self.assertEqual(exitcode, os.EX_OK)
def test_run_loglevel_resetting(self):
'''just exercise the loglevel settings'''
argv = ['--doctypes', '--loglevel', 'debug']
tldp.driver.run(argv)
def test_run_extra_args(self):
self.add_new('New-HOWTO', example.ex_linuxdoc)
fullpath = opj(self.tempdir, 'sources', 'New-HOWTO.sgml')
argv = self.argv
argv.extend(['--build', 'stale', 'Orphan-HOWTO', fullpath, 'extra'])
val = tldp.driver.run(argv)
self.assertTrue('Unknown arguments' in val)
def test_run_no_action(self):
c = self.config
ex = example.ex_linuxdoc
self.add_new('New-HOWTO', ex)
tldp.driver.run(self.argv)
docbuilddir = opj(c.builddir, ex.doctype.__name__)
inv = tldp.inventory.Inventory(docbuilddir, c.sourcedir)
self.assertEqual(1, len(inv.published.keys()))
def test_run_oops_no_sourcedir(self):
c = self.config
argv = ['--pubdir', c.pubdir]
ex = example.ex_linuxdoc
self.add_new('New-HOWTO', ex)
exitcode = tldp.driver.run(argv)
self.assertTrue('required for inventory' in exitcode)
def test_run_oops_no_pubdir(self):
c = self.config
argv = ['--sourcedir', c.sourcedir[0]]
self.add_new('New-HOWTO', example.ex_linuxdoc)
exitcode = tldp.driver.run(argv)
self.assertTrue('required for inventory' in exitcode)
def test_run_build_no_pubdir(self):
c = self.config
argv = ['--sourcedir', c.sourcedir[0]]
fname = opj(sampledocs, 'linuxdoc-simple.sgml')
argv.append(fname)
exitcode = tldp.driver.run(argv)
self.assertTrue('to --build' in exitcode)
class TestDriverProcessSkips(TestInventoryBase):
def test_skipDocuments_status(self):
c = self.config
ex = example.ex_linuxdoc
self.add_new('New-HOWTO', ex)
self.add_stale('Stale-HOWTO', ex)
c.skip = ['stale']
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
docs = inv.all.values()
inc, exc = tldp.driver.processSkips(c, docs)
self.assertTrue(1, len(exc))
excluded = exc.pop()
self.assertEqual(excluded.stem, 'Stale-HOWTO')
self.assertEqual(len(inc) + 1, len(inv.all.keys()))
def test_skipDocuments_stem(self):
c = self.config
ex = example.ex_linuxdoc
self.add_published('Published-HOWTO', ex)
self.add_new('New-HOWTO', ex)
c.skip = ['Published-HOWTO']
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
docs = inv.all.values()
inc, exc = tldp.driver.processSkips(c, docs)
self.assertTrue(1, len(exc))
excluded = exc.pop()
self.assertEqual(excluded.stem, 'Published-HOWTO')
self.assertEqual(len(inc) + 1, len(inv.all.keys()))
def test_skipDocuments_doctype(self):
c = self.config
self.add_published('Linuxdoc-HOWTO', example.ex_linuxdoc)
self.add_new('Docbook4XML-HOWTO', example.ex_docbook4xml)
c.skip = ['Docbook4XML']
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
docs = inv.all.values()
inc, exc = tldp.driver.processSkips(c, docs)
self.assertTrue(1, len(exc))
excluded = exc.pop()
self.assertEqual(excluded.stem, 'Docbook4XML-HOWTO')
self.assertEqual(len(inc) + 1, len(inv.all.keys()))
class TestDriverScript(TestInventoryBase):
def test_script(self):
c = self.config
c.script = True
stdout = io.StringIO()
self.add_published('Published-HOWTO', example.ex_linuxdoc)
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
tldp.driver.script(c, inv.all.values(), file=stdout)
stdout.seek(0)
data = stdout.read()
self.assertTrue('Published-HOWTO' in data)
def test_script_no_pubdir(self):
c = self.config
c.script = True
stdout = io.StringIO()
self.add_published('New-HOWTO', example.ex_linuxdoc)
c.pubdir = None
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
tldp.driver.script(c, inv.all.values(), file=stdout)
stdout.seek(0)
data = stdout.read()
self.assertTrue('New-HOWTO' in data)
def test_run_script(self):
self.add_published('Published-HOWTO', example.ex_linuxdoc)
self.add_new('New-HOWTO', example.ex_linuxdoc)
self.add_stale('Stale-HOWTO', example.ex_linuxdoc)
self.add_orphan('Orphan-HOWTO', example.ex_linuxdoc)
self.add_broken('Broken-HOWTO', example.ex_linuxdoc)
argv = self.argv
argv.append('--script')
exitcode = tldp.driver.run(argv)
self.assertEqual(exitcode, os.EX_OK)
def test_script_bad_invocation(self):
c = self.config
c.script = False
self.add_published('Published-HOWTO', example.ex_linuxdoc)
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
with self.assertRaises(Exception) as ecm:
tldp.driver.script(c, inv.all.values())
e = ecm.exception
self.assertTrue("neither --build nor --script" in e.args[0])
#
# -- end of file
| |
# (C) Copyright 2015,2016 Hewlett Packard Enterprise Development LP
from __future__ import absolute_import
import os
import re
import docker
from monasca_agent.collector import checks
CONTAINER_ID_RE = re.compile('[0-9a-f]{64}')
DEFAULT_BASE_URL = "unix://var/run/docker.sock"
DEFAULT_VERSION = "auto"
DEFAULT_TIMEOUT = 3
DEFAULT_ADD_KUBERNETES_DIMENSIONS = False
JIFFY_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
CGROUPS = ['cpuacct', 'memory', 'blkio']
class Docker(checks.AgentCheck):
"""Collect metrics and events from Docker API and cgroups"""
def __init__(self, name, init_config, agent_config, instances=None):
checks.AgentCheck.__init__(self, name, init_config, agent_config, instances)
if instances is not None and len(instances) > 1:
raise Exception('Docker check only supports one configured instance.')
self.connection_timeout = int(init_config.get('connection_timeout', DEFAULT_TIMEOUT))
self.docker_version = init_config.get('version', DEFAULT_VERSION)
self.docker_root = init_config.get('docker_root', '/')
# Locate cgroups directories
self._mount_points = {}
self._cgroup_filename_pattern = None
for cgroup in CGROUPS:
self._mount_points[cgroup] = self._find_cgroup(cgroup)
self._prev_cpu = {}
self._curr_cpu = {}
self._cpu_count = None
self._prev_system_cpu = None
def check(self, instance):
docker_url = instance.get('url', DEFAULT_BASE_URL)
try:
docker_client = docker.Client(base_url=docker_url, version=self.docker_version,
timeout=self.connection_timeout)
running_containers = {container['Id']: container for container in self._get_containers(docker_client)}
except Exception as e:
self.log.error("Could not get containers from Docker API skipping Docker check - {}".format(e))
return
add_kubernetes_dimensions = instance.get('add_kubernetes_dimensions', DEFAULT_ADD_KUBERNETES_DIMENSIONS)
dimensions = self._set_dimensions(None, instance)
self.gauge("container.running_count", len(running_containers), dimensions=dimensions)
self._set_container_pids(running_containers)
# Report container metrics from cgroups
self._report_container_metrics(running_containers, add_kubernetes_dimensions, dimensions)
def _report_rate_gauge_metric(self, metric_name, value, dimensions):
self.rate(metric_name + "_sec", value, dimensions=dimensions)
self.gauge(metric_name, value, dimensions=dimensions)
def _report_container_metrics(self, container_dict, add_kubernetes_dimensions, dimensions):
self._curr_system_cpu, self._cpu_count = self._get_system_cpu_ns()
system_memory = self._get_total_memory()
for container in container_dict.itervalues():
try:
container_dimensions = dimensions.copy()
container_id = container['Id']
container['name'] = self._get_container_name(container['Names'], container_id)
container_dimensions['image'] = container['Image']
container_labels = container['Labels']
if add_kubernetes_dimensions:
if 'io.kubernetes.pod.name' in container_labels:
container_dimensions['kubernetes_pod_name'] = container_labels['io.kubernetes.pod.name']
if 'io.kubernetes.pod.namespace' in container_labels:
container_dimensions['kubernetes_namespace'] = container_labels['io.kubernetes.pod.namespace']
self._report_cgroup_cpuacct(container_id, container_dimensions)
self._report_cgroup_memory(container_id, container_dimensions, system_memory)
self._report_cgroup_blkio(container_id, container_dimensions)
if "_proc_root" in container:
self._report_net_metrics(container, container_dimensions)
self._report_cgroup_cpu_pct(container_id, container_dimensions)
except IOError as err:
# It is possible that the container got stopped between the
# API call and now
self.log.info("IO error while collecting cgroup metrics, "
"skipping container...", exc_info=err)
except Exception as err:
self.log.error("Error when collecting data about container {}".format(err))
self._prev_system_cpu = self._curr_system_cpu
def _get_container_name(self, container_names, container_id):
container_name = None
if container_names:
for name in container_names:
# if there is more than one / the name is actually an alias
if name.count('/') <= 1:
container_name = str(name).lstrip('/')
break
return container_name if container_name else container_id
def _report_cgroup_cpuacct(self, container_id, container_dimensions):
stat_file = self._get_cgroup_file('cpuacct', container_id, 'cpuacct.stat')
stats = self._parse_cgroup_pairs(stat_file)
self._report_rate_gauge_metric('container.cpu.user_time', stats['user'], container_dimensions)
self._report_rate_gauge_metric('container.cpu.system_time', stats['system'], container_dimensions)
def _report_cgroup_memory(self, container_id, container_dimensions, system_memory_limit):
stat_file = self._get_cgroup_file('memory', container_id, 'memory.stat')
stats = self._parse_cgroup_pairs(stat_file)
cache_memory = stats['cache']
rss_memory = stats['rss']
self.gauge('container.mem.cache', cache_memory, dimensions=container_dimensions)
self.gauge('container.mem.rss', rss_memory, dimensions=container_dimensions)
swap_memory = 0
if 'swap' in stats:
swap_memory = stats['swap']
self.gauge('container.mem.swap', swap_memory, dimensions=container_dimensions)
# Get container max memory
memory_limit_file = self._get_cgroup_file('memory', container_id, 'memory.limit_in_bytes')
memory_limit = self._parse_cgroup_value(memory_limit_file, convert=float)
if memory_limit > system_memory_limit:
memory_limit = float(system_memory_limit)
used_perc = round((((cache_memory + rss_memory + swap_memory) / memory_limit) * 100), 2)
self.gauge('container.mem.used_perc', used_perc, dimensions=container_dimensions)
def _report_cgroup_blkio(self, container_id, container_dimensions):
stat_file = self._get_cgroup_file('blkio', container_id,
'blkio.throttle.io_service_bytes')
stats = self._parse_cgroup_blkio_metrics(stat_file)
self._report_rate_gauge_metric('container.io.read_bytes', stats['io_read'], container_dimensions)
self._report_rate_gauge_metric('container.io.write_bytes', stats['io_write'], container_dimensions)
def _report_cgroup_cpu_pct(self, container_id, container_dimensions):
usage_file = self._get_cgroup_file('cpuacct', container_id, 'cpuacct.usage')
prev_cpu = self._prev_cpu.get(container_id, None)
curr_cpu = self._parse_cgroup_value(usage_file)
self._prev_cpu[container_id] = curr_cpu
if prev_cpu is None:
# probably first run, we need 2 data points
return
system_cpu_delta = float(self._curr_system_cpu - self._prev_system_cpu)
container_cpu_delta = float(curr_cpu - prev_cpu)
if system_cpu_delta > 0 and container_cpu_delta > 0:
cpu_pct = (container_cpu_delta / system_cpu_delta) * self._cpu_count * 100
self.gauge('container.cpu.utilization_perc', cpu_pct, dimensions=container_dimensions)
def _report_net_metrics(self, container, container_dimensions):
"""Find container network metrics by looking at /proc/$PID/net/dev of the container process."""
proc_net_file = os.path.join(container['_proc_root'], 'net/dev')
try:
with open(proc_net_file, 'r') as f:
lines = f.readlines()
"""Two first lines are headers:
Inter-| Receive | Transmit
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
"""
for line in lines[2:]:
cols = line.split(':', 1)
interface_name = str(cols[0]).strip()
if interface_name != 'lo':
container_network_dimensions = container_dimensions.copy()
container_network_dimensions['interface'] = interface_name
network_values = cols[1].split()
self._report_rate_gauge_metric("container.net.in_bytes", long(network_values[0]),
container_network_dimensions)
self._report_rate_gauge_metric("container.net.out_bytes", long(network_values[8]),
container_network_dimensions)
break
except Exception as e:
self.log.error("Failed to report network metrics from file {0}. Exception: {1}".format(proc_net_file, e))
# Docker API
def _get_containers(self, docker_client):
"""Gets the list of running containers in Docker."""
return docker_client.containers()
def _find_cgroup_filename_pattern(self, container_id):
# We try with different cgroups so that it works even if only one is properly working
for mountpoint in self._mount_points.itervalues():
stat_file_path_lxc = os.path.join(mountpoint, "lxc")
stat_file_path_docker = os.path.join(mountpoint, "docker")
stat_file_path_coreos = os.path.join(mountpoint, "system.slice")
stat_file_path_kubernetes = os.path.join(mountpoint, container_id)
stat_file_path_kubernetes_docker = os.path.join(mountpoint, "system", "docker", container_id)
stat_file_path_docker_daemon = os.path.join(mountpoint, "docker-daemon", "docker", container_id)
if os.path.exists(stat_file_path_lxc):
return '%(mountpoint)s/lxc/%(id)s/%(file)s'
elif os.path.exists(stat_file_path_docker):
return '%(mountpoint)s/docker/%(id)s/%(file)s'
elif os.path.exists(stat_file_path_coreos):
return '%(mountpoint)s/system.slice/docker-%(id)s.scope/%(file)s'
elif os.path.exists(stat_file_path_kubernetes):
return '%(mountpoint)s/%(id)s/%(file)s'
elif os.path.exists(stat_file_path_kubernetes_docker):
return '%(mountpoint)s/system/docker/%(id)s/%(file)s'
elif os.path.exists(stat_file_path_docker_daemon):
return '%(mountpoint)s/docker-daemon/docker/%(id)s/%(file)s'
raise Exception("Cannot find Docker cgroup directory. Be sure your system is supported.")
def _get_cgroup_file(self, cgroup, container_id, filename):
# This can't be initialized at startup because cgroups may not be mounted yet
if not self._cgroup_filename_pattern:
self._cgroup_filename_pattern = self._find_cgroup_filename_pattern(container_id)
return self._cgroup_filename_pattern % (dict(
mountpoint=self._mount_points[cgroup],
id=container_id,
file=filename,
))
def _get_total_memory(self):
with open(os.path.join(self.docker_root, '/proc/meminfo')) as f:
for line in f.readlines():
tokens = line.split()
if tokens[0] == 'MemTotal:':
return int(tokens[1]) * 1024
raise Exception('Invalid formatting in /proc/meminfo: unable to '
'determine MemTotal')
def _get_system_cpu_ns(self):
# see also: getSystemCPUUsage of docker's stats_collector_unix.go
total_jiffies = None
cpu_count = 0
with open(os.path.join(self.docker_root, '/proc/stat'), 'r') as f:
for line in f.readlines():
tokens = line.split()
if tokens[0] == 'cpu':
if len(tokens) < 8:
raise Exception("Invalid formatting in /proc/stat")
total_jiffies = sum(map(lambda t: int(t), tokens[1:8]))
elif tokens[0].startswith('cpu'):
# startswith but does not equal implies /cpu\d+/ or so
# we don't need full per-cpu usage to calculate %,
# so just count cores
cpu_count += 1
if not total_jiffies:
raise Exception("Unable to find CPU usage in /proc/stat")
cpu_time_ns = (total_jiffies / JIFFY_HZ) * 1e9
return cpu_time_ns, cpu_count
def _find_cgroup(self, hierarchy):
"""Finds the mount point for a specified cgroup hierarchy. Works with
old style and new style mounts.
"""
with open(os.path.join(self.docker_root, "/proc/mounts"), 'r') as f:
mounts = map(lambda x: x.split(), f.read().splitlines())
cgroup_mounts = filter(lambda x: x[2] == "cgroup", mounts)
if len(cgroup_mounts) == 0:
raise Exception("Can't find mounted cgroups. If you run the Agent inside a container,"
" please refer to the documentation.")
# Old cgroup style
if len(cgroup_mounts) == 1:
return os.path.join(self.docker_root, cgroup_mounts[0][1])
candidate = None
for _, mountpoint, _, opts, _, _ in cgroup_mounts:
if hierarchy in opts:
if mountpoint.startswith("/host/"):
return os.path.join(self.docker_root, mountpoint)
candidate = mountpoint
if candidate is not None:
return os.path.join(self.docker_root, candidate)
raise Exception("Can't find mounted %s cgroups." % hierarchy)
def _parse_cgroup_value(self, stat_file, convert=int):
"""Parse a cgroup info file containing a single value."""
with open(stat_file, 'r') as f:
return convert(f.read().strip())
def _parse_cgroup_pairs(self, stat_file, convert=int):
"""Parse a cgroup file for key/values."""
with open(stat_file, 'r') as f:
split_lines = map(lambda x: x.split(' ', 1), f.readlines())
return {k: convert(v) for k, v in split_lines}
def _parse_cgroup_blkio_metrics(self, stat_file):
"""Parse the blkio metrics."""
with open(stat_file, 'r') as f:
stats = f.read().splitlines()
metrics = {
'io_read': 0,
'io_write': 0,
}
for line in stats:
if 'Read' in line:
metrics['io_read'] += int(line.split()[2])
if 'Write' in line:
metrics['io_write'] += int(line.split()[2])
return metrics
# checking if cgroup is a container cgroup
def _is_container_cgroup(self, line, selinux_policy):
if line[1] not in ('cpu,cpuacct', 'cpuacct,cpu', 'cpuacct') or line[2] == '/docker-daemon':
return False
if 'docker' in line[2]:
return True
if 'docker' in selinux_policy:
return True
if line[2].startswith('/') and re.match(CONTAINER_ID_RE, line[2][1:]): # kubernetes
return True
return False
def _set_container_pids(self, containers):
"""Find all proc paths for running containers."""
proc_path = os.path.join(self.docker_root, 'proc')
pid_dirs = [_dir for _dir in os.listdir(proc_path) if _dir.isdigit()]
for pid_dir in pid_dirs:
try:
path = os.path.join(proc_path, pid_dir, 'cgroup')
with open(path, 'r') as f:
content = [line.strip().split(':') for line in f.readlines()]
selinux_policy = ''
path = os.path.join(proc_path, pid_dir, 'attr', 'current')
if os.path.exists(path):
with open(path, 'r') as f:
selinux_policy = f.readlines()[0]
except IOError as e:
self.log.debug("Cannot read %s, "
"process likely raced to finish : %s" %
(path, str(e)))
continue
except Exception as e:
self.log.warning("Cannot read %s : %s" % (path, str(e)))
continue
try:
cpuacct = None
for line in content:
if self._is_container_cgroup(line, selinux_policy):
cpuacct = line[2]
break
matches = re.findall(CONTAINER_ID_RE, cpuacct) if cpuacct else None
if matches:
container_id = matches[-1]
if container_id not in containers:
self.log.debug("Container %s not in container_dict, it's likely excluded", container_id)
continue
containers[container_id]['_pid'] = pid_dir
containers[container_id]['_proc_root'] = os.path.join(proc_path, pid_dir)
except Exception as e:
self.log.warning("Cannot parse %s content: %s" % (path, str(e)))
continue
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import unittest
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, verbose_diffs, default False:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER = 'tensorflow/tools/api/golden'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
def _KeyToFilePath(key):
"""From a given key, construct a filepath."""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
return os.path.join(_API_GOLDEN_FOLDER, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub(
'((-[a-z]){1})', _ReplaceDashWithCaps, base_filename_without_ext)
return api_object_key
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden
files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed).' % key
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
logging.error('Issue %d\t: %s', i + 1, messages[i])
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
@unittest.skipUnless(
sys.version_info.major == 2 and os.uname()[0] == 'Linux',
'API compabitility test goldens are generated using python2 on Linux.')
def testAPIBackwardsCompatibility(self):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
traverse.traverse(tf, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
expression = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*'))
golden_file_list = file_io.get_matching_files(expression)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=False, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
| |
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from __future__ import unicode_literals
from mock import MagicMock, patch
import pytest
import urllib
import tornado.httpclient
from koi.exceptions import HTTPError
from koi.test_helpers import gen_test, make_future
from repository.controllers.base import RepoBaseHandler
class PartialMockedHandler(RepoBaseHandler):
def __init__(self):
super(PartialMockedHandler, self).__init__(application=MagicMock(),
request=MagicMock())
self.finish = MagicMock()
self.request.method = 'GET'
self.path_kwargs = {'repository_id': 'repo1'}
@patch('repository.controllers.base.API')
@gen_test
def test_get_organisation_id(API):
client = API().accounts.repositories['repo1']
client.get.return_value = make_future({'status': 200, 'data': {'id': 'repo1', 'organisation': {'id': 'org1'}}})
handler = PartialMockedHandler()
result = yield handler.get_organisation_id('repo1')
headers = {'Accept': 'application/json'}
client.prepare_request.assert_called_with(
request_timeout=180,
headers=headers
)
assert client.get.call_count == 1
assert result == 'org1'
@patch('repository.controllers.base.API')
@gen_test
def test_get_organisation_id_http_error(API):
client = API().accounts.repositories['repo1']
client.get.side_effect = tornado.httpclient.HTTPError(403, 'errormsg')
handler = PartialMockedHandler()
with pytest.raises(HTTPError) as exc:
yield handler.get_organisation_id('repo1')
headers = {'Accept': 'application/json'}
client.prepare_request.assert_called_with(
request_timeout=180,
headers=headers
)
assert client.get.call_count == 1
assert exc.value.status_code == 500
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.API')
@gen_test
def test_verify_repository_token_true(API, options):
options.service_id = 'service1'
options.secret_id = 'secret1'
client = API().auth.verify
client.post.return_value = make_future({'status': 200, 'has_access': True})
handler = PartialMockedHandler()
result = yield handler.verify_repository_token('token1', 'r', 'repo1')
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
body = urllib.urlencode({
'token': 'token1',
'requested_access': 'r',
'resource_id': 'repo1'
})
client.prepare_request.assert_called_with(
request_timeout=180,
headers=headers
)
client.post.assert_called_once_with(body=body)
assert result is True
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.API')
@gen_test
def test_verify_repository_token_false(API, options):
options.service_id = 'service1'
options.secret_id = 'secret1'
client = API().auth.verify
client.post.return_value = make_future({'status': 200, 'has_access': False})
handler = PartialMockedHandler()
result = yield handler.verify_repository_token('token1', 'r', 'repo1')
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
body = urllib.urlencode({
'token': 'token1',
'requested_access': 'r',
'resource_id': 'repo1'
})
client.prepare_request.assert_called_with(
request_timeout=180,
headers=headers
)
client.post.assert_called_once_with(body=body)
assert result is False
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.API')
@gen_test
def test_verify_repository_token_no_repo_id(API, options):
options.service_id = 'service1'
options.secret_id = 'secret1'
client = API().auth.verify
client.post.return_value = make_future({'status': 200, 'has_access': True})
handler = PartialMockedHandler()
result = yield handler.verify_repository_token('token1', 'r', None)
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
body = urllib.urlencode({
'token': 'token1',
'requested_access': 'r'
})
client.prepare_request.assert_called_with(
request_timeout=180,
headers=headers
)
client.post.assert_called_once_with(body=body)
assert result is True
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.API')
@gen_test
def test_verify_repository_token_http_error(API, options):
options.service_id = 'service1'
options.secret_id = 'secret1'
client = API().auth.verify
client.post.side_effect = tornado.httpclient.HTTPError(403, 'errormsg')
handler = PartialMockedHandler()
with pytest.raises(HTTPError) as exc:
yield handler.verify_repository_token('token1', 'r', 'repo1')
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
body = urllib.urlencode({
'token': 'token1',
'requested_access': 'r',
'resource_id': 'repo1'
})
client.prepare_request.assert_called_with(
request_timeout=180,
headers=headers
)
client.post.assert_called_once_with(body=body)
assert exc.value.status_code == 500
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.RepoBaseHandler.get_organisation_id')
@patch('repository.controllers.base.RepoBaseHandler.verify_repository_token')
@patch('repository.controllers.base.jwt')
@gen_test
def test_prepare_no_repository_id(jwt, verify_repository_token, get_organisation_id, options):
jwt.decode.return_value = {
"client": {
"service_type": "external",
"organisation_id": "developerco",
"id": "client3"
},
"sub": "client1"
}
verify_repository_token.return_value = make_future(True)
options.standalone = False
handler = PartialMockedHandler()
handler.request.headers = {'Authorization': 'Bearer token1234'}
handler.path_kwargs = {}
yield handler.prepare()
verify_repository_token.assert_called_once_with('token1234', 'r', None)
assert not get_organisation_id.called
assert handler.token == {
"client": {
"service_type": "external",
"organisation_id": "developerco",
"id": "client3"
},
"sub": "client1"
}
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.RepoBaseHandler.get_organisation_id')
@patch('repository.controllers.base.RepoBaseHandler.verify_repository_token')
@patch('repository.controllers.base.jwt')
@gen_test
def test_prepare_oauth_required_valid(jwt, verify_repository_token, get_organisation_id, options):
jwt.decode.return_value = {
"client": {
"service_type": "external",
"organisation_id": "developerco",
"id": "client3"
},
"sub": "client1"
}
verify_repository_token.return_value = make_future(True)
get_organisation_id.return_value = make_future('org1')
options.standalone = False
handler = PartialMockedHandler()
handler.request.headers = {'Authorization': 'Bearer token1234'}
yield handler.prepare()
verify_repository_token.assert_called_once_with('token1234', 'r', 'repo1')
get_organisation_id.assert_called_once_with('repo1')
assert handler.token == {
"client": {
"service_type": "external",
"organisation_id": "developerco",
"id": "client3"
},
"sub": "client1"
}
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.RepoBaseHandler.get_organisation_id')
@patch('repository.controllers.base.RepoBaseHandler.verify_repository_token')
@patch('repository.controllers.base.jwt')
@gen_test
def test_prepare_oauth_required_missing_bearer(jwt, verify_repository_token, get_organisation_id, options):
jwt.decode.return_value = {
"client": {
"service_type": "external",
"organisation_id": "developerco",
"id": "client3"
},
"sub": "client1"
}
verify_repository_token.return_value = make_future(True)
get_organisation_id.return_value = make_future('org1')
options.standalone = False
handler = PartialMockedHandler()
handler.request.headers = {'Authorization': 'token1234'}
yield handler.prepare()
verify_repository_token.assert_called_once_with('token1234', 'r', 'repo1')
get_organisation_id.assert_called_once_with('repo1')
assert handler.token == {
"client": {
"service_type": "external",
"organisation_id": "developerco",
"id": "client3"
},
"sub": "client1"
}
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.RepoBaseHandler.get_organisation_id')
@patch('repository.controllers.base.RepoBaseHandler.verify_repository_token')
@gen_test
def test_prepare_oauth_required_invalid(verify_repository_token, get_organisation_id, options):
verify_repository_token.return_value = make_future(False)
options.standalone = False
handler = PartialMockedHandler()
handler.request.headers = {'Authorization': 'Bearer token1234'}
with pytest.raises(HTTPError) as exc:
yield handler.prepare()
verify_repository_token.assert_called_once_with('token1234', 'r', 'repo1')
assert not get_organisation_id.called
assert exc.value.status_code == 403
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.RepoBaseHandler.get_organisation_id')
@patch('repository.controllers.base.RepoBaseHandler.verify_repository_token')
@gen_test
def test_prepare_oauth_required_missing_header(verify_repository_token, get_organisation_id, options):
options.standalone = False
handler = PartialMockedHandler()
handler.request.headers = {}
with pytest.raises(HTTPError) as exc:
yield handler.prepare()
assert not verify_repository_token.called
assert not get_organisation_id.called
assert exc.value.status_code == 401
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.RepoBaseHandler.get_organisation_id')
@patch('repository.controllers.base.RepoBaseHandler.verify_repository_token')
@gen_test
def test_prepare_oauth_required_unauthenticated_endpoint(verify_repository_token, get_organisation_id, options):
options.standalone = False
handler = PartialMockedHandler()
handler.METHOD_ACCESS = {
'GET': handler.UNAUTHENTICATED_ACCESS
}
get_organisation_id.return_value = make_future('org1')
yield handler.prepare()
assert not verify_repository_token.called
get_organisation_id.assert_called_once_with('repo1')
assert handler.token is None
@patch('repository.controllers.base.options')
@patch('repository.controllers.base.RepoBaseHandler.get_organisation_id')
@patch('repository.controllers.base.RepoBaseHandler.verify_repository_token')
@gen_test
def test_prepare_standalone_mode(verify_repository_token, get_organisation_id, options):
options.standalone = True
handler = PartialMockedHandler()
yield handler.prepare()
assert not verify_repository_token.called
assert not get_organisation_id.called
assert handler.token is None
assert handler.organisation_id is None
@pytest.mark.parametrize('content_type', RepoBaseHandler.ALLOWED_CONTENT_TYPES)
def test_get_content_type(content_type):
handler = PartialMockedHandler()
handler.request.headers = {'Content-Type': content_type}
assert handler.get_content_type() == content_type
def test_default_content_type():
handler = PartialMockedHandler()
handler.request.headers = {}
assert handler.get_content_type() == RepoBaseHandler.DEFAULT_CONTENT_TYPE
def test_invalid_content_type():
handler = PartialMockedHandler()
handler.request.headers = {'Content-Type': 'application/json'}
with pytest.raises(HTTPError) as exc:
handler.get_content_type()
assert exc.value.status_code == 415
| |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from tests.unit.docs import BaseDocsTest
from botocore.docs.utils import py_type_name
from botocore.docs.utils import py_default
from botocore.docs.utils import get_official_service_name
from botocore.docs.utils import AutoPopulatedParam
from botocore.docs.utils import HideParamFromOperations
from botocore.docs.utils import AppendParamDocumentation
from botocore.docs.utils import escape_controls
class TestPythonTypeName(unittest.TestCase):
def test_structure(self):
self.assertEqual('dict', py_type_name('structure'))
def test_list(self):
self.assertEqual('list', py_type_name('list'))
def test_map(self):
self.assertEqual('dict', py_type_name('map'))
def test_string(self):
self.assertEqual('string', py_type_name('string'))
def test_character(self):
self.assertEqual('string', py_type_name('character'))
def test_blob(self):
self.assertEqual('bytes', py_type_name('blob'))
def test_timestamp(self):
self.assertEqual('datetime', py_type_name('timestamp'))
def test_integer(self):
self.assertEqual('integer', py_type_name('integer'))
def test_long(self):
self.assertEqual('integer', py_type_name('long'))
def test_float(self):
self.assertEqual('float', py_type_name('float'))
def test_double(self):
self.assertEqual('float', py_type_name('double'))
class TestPythonDefault(unittest.TestCase):
def test_structure(self):
self.assertEqual('{...}', py_default('structure'))
def test_list(self):
self.assertEqual('[...]', py_default('list'))
def test_map(self):
self.assertEqual('{...}', py_default('map'))
def test_string(self):
self.assertEqual('\'string\'', py_default('string'))
def test_blob(self):
self.assertEqual('b\'bytes\'', py_default('blob'))
def test_timestamp(self):
self.assertEqual('datetime(2015, 1, 1)', py_default('timestamp'))
def test_integer(self):
self.assertEqual('123', py_default('integer'))
def test_long(self):
self.assertEqual('123', py_default('long'))
def test_double(self):
self.assertEqual('123.0', py_default('double'))
class TestGetOfficialServiceName(BaseDocsTest):
def setUp(self):
super(TestGetOfficialServiceName, self).setUp()
self.service_model.metadata = {
'serviceFullName': 'Official Name'
}
def test_no_short_name(self):
self.assertEqual('Official Name',
get_official_service_name(self.service_model))
def test_aws_short_name(self):
self.service_model.metadata['serviceAbbreviation'] = 'AWS Foo'
self.assertEqual('Official Name (Foo)',
get_official_service_name(self.service_model))
def test_amazon_short_name(self):
self.service_model.metadata['serviceAbbreviation'] = 'Amazon Foo'
self.assertEqual('Official Name (Foo)',
get_official_service_name(self.service_model))
def test_short_name_in_official_name(self):
self.service_model.metadata['serviceFullName'] = 'The Foo Service'
self.service_model.metadata['serviceAbbreviation'] = 'Amazon Foo'
self.assertEqual('The Foo Service',
get_official_service_name(self.service_model))
class TestAutopopulatedParam(BaseDocsTest):
def setUp(self):
super(TestAutopopulatedParam, self).setUp()
self.name = 'MyMember'
self.param = AutoPopulatedParam(self.name)
def test_request_param_not_required(self):
section = self.doc_structure.add_new_section(self.name)
section.add_new_section('param-documentation')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assert_contains_line(
'this parameter is automatically populated')
def test_request_param_required(self):
section = self.doc_structure.add_new_section(self.name)
is_required_section = section.add_new_section('is-required')
section.add_new_section('param-documentation')
is_required_section.write('**[REQUIRED]**')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assert_not_contains_line('**[REQUIRED]**')
self.assert_contains_line(
'this parameter is automatically populated')
def test_non_default_param_description(self):
description = 'This is a custom description'
self.param = AutoPopulatedParam(self.name, description)
section = self.doc_structure.add_new_section(self.name)
section.add_new_section('param-documentation')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assert_contains_line(description)
def test_request_example(self):
top_section = self.doc_structure.add_new_section('structure-value')
section = top_section.add_new_section(self.name)
example = 'MyMember: \'string\''
section.write(example)
self.assert_contains_line(example)
self.param.document_auto_populated_param(
'docs.request-example', self.doc_structure)
self.assert_not_contains_line(example)
def test_param_not_in_section_request_param(self):
self.doc_structure.add_new_section('Foo')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assertEqual(
'', self.doc_structure.flush_structure().decode('utf-8'))
def test_param_not_in_section_request_example(self):
top_section = self.doc_structure.add_new_section('structure-value')
section = top_section.add_new_section('Foo')
example = 'Foo: \'string\''
section.write(example)
self.assert_contains_line(example)
self.param.document_auto_populated_param(
'docs.request-example', self.doc_structure)
self.assert_contains_line(example)
class TestHideParamFromOperations(BaseDocsTest):
def setUp(self):
super(TestHideParamFromOperations, self).setUp()
self.name = 'MyMember'
self.param = HideParamFromOperations(
's3', self.name, ['SampleOperation'])
def test_hides_params_from_doc_string(self):
section = self.doc_structure.add_new_section(self.name)
param_signature = ':param %s: ' % self.name
section.write(param_signature)
self.assert_contains_line(param_signature)
self.param.hide_param(
'docs.request-params.s3.SampleOperation.complete-section',
self.doc_structure)
self.assert_not_contains_line(param_signature)
def test_hides_param_from_example(self):
structure = self.doc_structure.add_new_section('structure-value')
section = structure.add_new_section(self.name)
example = '%s: \'string\'' % self.name
section.write(example)
self.assert_contains_line(example)
self.param.hide_param(
'docs.request-example.s3.SampleOperation.complete-section',
self.doc_structure)
self.assert_not_contains_line(example)
class TestAppendParamDocumentation(BaseDocsTest):
def setUp(self):
super(TestAppendParamDocumentation, self).setUp()
self.name = 'MyMember'
self.param = AppendParamDocumentation(self.name, 'hello!')
def test_appends_documentation(self):
section = self.doc_structure.add_new_section(self.name)
param_section = section.add_new_section('param-documentation')
param_section.writeln('foo')
self.param.append_documentation(
'docs.request-params', self.doc_structure)
self.assert_contains_line('foo\n')
self.assert_contains_line('hello!')
class TestEscapeControls(unittest.TestCase):
def test_escapes_controls(self):
escaped = escape_controls('\na\rb\tc\fd\be')
self.assertEqual(escaped, '\\na\\rb\\tc\\fd\\be')
| |
#!/usr/bin/env python
"""This file defines the base classes for Flows.
A Flow is a state machine which executes actions on the
client. Messages are transmitted between the flow object and the
client with their responses introduced into a state handler within the
flow.
The flow can send messages to a client, or launch other child flows. While these
messages are processed, the flow can be suspended indefinitely into the data
store. When replies arrive from the client, or a child flow, the flow is woken
up and the responses are sent to one of the flow state methods.
In order for the flow to be suspended and restored, its state is
stored in a protobuf. Rather than storing the entire flow, the
preserved state is well defined and can be found in the flow's "state"
attribute. Note that this means that any parameters assigned to the
flow object itself are not preserved across state executions - only
parameters specifically stored in the state are preserved.
"""
import enum
import logging
import traceback
from typing import Optional, Sequence
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import type_info
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import random
from grr_response_core.stats import metrics
from grr_response_server import data_store
from grr_response_server.databases import db
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
GRR_FLOW_INVALID_FLOW_COUNT = metrics.Counter("grr_flow_invalid_flow_count")
class Error(Exception):
"""Base class for this package's exceptions."""
class CanNotStartFlowWithExistingIdError(Error):
"""Raises by StartFlow when trying to start a flow with an existing id."""
def __init__(self, client_id, flow_id):
message = ("Flow %s already exists on the client %s." %
(client_id, flow_id))
super().__init__(message)
self.client_id = client_id
self.flow_id = flow_id
class FlowResourcesExceededError(Error):
"""An error indicating that the flow used too many resources."""
# This is an implementation of an AttributedDict taken from
# http://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute-in-python
# It works very well but there is a small drawback - there is no way
# to assign an attribute to this dict that does not get serialized. Do
# not inherit from this class, there might be interesting side
# effects.
class AttributedDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def FilterArgsFromSemanticProtobuf(protobuf, kwargs):
"""Assign kwargs to the protobuf, and remove them from the kwargs dict."""
for descriptor in protobuf.type_infos:
value = kwargs.pop(descriptor.name, None)
if value is not None:
setattr(protobuf, descriptor.name, value)
def GetOutputPluginStates(output_plugins, source=None):
"""Initializes state for a list of output plugins."""
output_plugins_states = []
for plugin_descriptor in output_plugins:
plugin_class = plugin_descriptor.GetPluginClass()
try:
_, plugin_state = plugin_class.CreatePluginAndDefaultState(
source_urn=source, args=plugin_descriptor.plugin_args)
except Exception as e: # pylint: disable=broad-except
raise ValueError("Plugin %s failed to initialize (%s)" %
(plugin_class, e))
# TODO(amoser): Those do not need to be inside the state, they
# could be part of the plugin descriptor.
plugin_state["logs"] = []
plugin_state["errors"] = []
output_plugins_states.append(
rdf_flow_runner.OutputPluginState(
plugin_state=plugin_state, plugin_descriptor=plugin_descriptor))
return output_plugins_states
def RandomFlowId() -> str:
"""Returns a random flow id encoded as a hex string."""
return "{:016X}".format(random.Id64())
class _ParentType(enum.Enum):
"""Enum describing what data type led to a flow's creation."""
ROOT = 0
FLOW = 1
HUNT = 2
SCHEDULED_FLOW = 3
class FlowParent(object):
"""Class describing what data type led to a flow's creation."""
def __init__(self,
parent_type: _ParentType,
parent_id: Optional[str] = None,
parent_flow_obj=None):
"""Instantiates a FlowParent. Use the class methods instead."""
self.type = parent_type
self.id = parent_id
self.flow_obj = parent_flow_obj
@property
def is_flow(self) -> bool:
"""True, if the flow is started as child-flow."""
return self.type == _ParentType.FLOW
@property
def is_hunt(self) -> bool:
"""True, if the flow is started as part of a hunt."""
return self.type == _ParentType.HUNT
@property
def is_root(self) -> bool:
"""True, if the flow is started as top-level flow."""
return self.type == _ParentType.ROOT
@property
def is_scheduled_flow(self) -> bool:
"""True, if the flow is started from a ScheduledFlow."""
return self.type == _ParentType.SCHEDULED_FLOW
@classmethod
def FromFlow(cls, flow_obj) -> "FlowParent":
"""References another flow (flow_base.FlowBase) as parent."""
return cls(_ParentType.FLOW, flow_obj.rdf_flow.flow_id, flow_obj)
@classmethod
def FromHuntID(cls, hunt_id: str) -> "FlowParent":
"""References another hunt as parent by its ID."""
return cls(_ParentType.HUNT, hunt_id)
@classmethod
def FromRoot(cls) -> "FlowParent":
"""References no parent to mark a flow as top-level flow."""
return cls(_ParentType.ROOT)
@classmethod
def FromScheduledFlowID(cls, scheduled_flow_id: str) -> "FlowParent":
"""References a ScheduledFlow as parent by its ID."""
return cls(_ParentType.SCHEDULED_FLOW, scheduled_flow_id)
def StartFlow(client_id=None,
cpu_limit=None,
creator=None,
flow_args=None,
flow_cls=None,
network_bytes_limit=None,
original_flow=None,
output_plugins=None,
start_at=None,
parent=None,
runtime_limit=None,
**kwargs):
"""The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent: A FlowParent referencing the parent, or None for top-level flows.
runtime_limit: Runtime limit as Duration for all ClientActions.
**kwargs: If args or runner_args are not specified, we construct these
protobufs from these keywords.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided.
"""
# Is the required flow a known flow?
try:
registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
except ValueError:
GRR_FLOW_INVALID_FLOW_COUNT.Increment()
raise ValueError("Unable to locate flow %s" % flow_cls.__name__)
if not client_id:
raise ValueError("Client_id is needed to start a flow.")
# Now parse the flow args into the new object from the keywords.
if flow_args is None:
flow_args = flow_cls.args_type()
FilterArgsFromSemanticProtobuf(flow_args, kwargs)
# At this point we should exhaust all the keyword args. If any are left
# over, we do not know what to do with them so raise.
if kwargs:
raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" % kwargs)
# Check that the flow args are valid.
flow_args.Validate()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_class_name=flow_cls.__name__,
args=flow_args,
create_time=rdfvalue.RDFDatetime.Now(),
creator=creator,
output_plugins=output_plugins,
original_flow=original_flow,
flow_state="RUNNING")
if parent is None:
parent = FlowParent.FromRoot()
if parent.is_hunt or parent.is_scheduled_flow:
# When starting a flow from a hunt or ScheduledFlow, re-use the parent's id
# to make it easy to find flows. For hunts, every client has a top-level
# flow with the hunt's id.
rdf_flow.flow_id = parent.id
else: # For new top-level and child flows, assign a random ID.
rdf_flow.flow_id = RandomFlowId()
# For better performance, only do conflicting IDs check for top-level flows.
if not parent.is_flow:
try:
data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
except db.UnknownFlowError:
pass
if parent.is_flow: # A flow is a nested flow.
parent_rdf_flow = parent.flow_obj.rdf_flow
rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id,
rdf_flow.flow_id)
rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
rdf_flow.parent_request_id = parent.flow_obj.GetCurrentOutboundId()
if parent_rdf_flow.creator:
rdf_flow.creator = parent_rdf_flow.creator
elif parent.is_hunt: # Root-level hunt-induced flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
rdf_flow.parent_hunt_id = parent.id
elif parent.is_root or parent.is_scheduled_flow:
# A flow is a root-level non-hunt flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
else:
raise ValueError(f"Unknown flow parent type {parent}")
if output_plugins:
rdf_flow.output_plugins_states = GetOutputPluginStates(
output_plugins, rdf_flow.long_flow_id)
if network_bytes_limit is not None:
rdf_flow.network_bytes_limit = network_bytes_limit
if cpu_limit is not None:
rdf_flow.cpu_limit = cpu_limit
if runtime_limit is not None:
rdf_flow.runtime_limit_us = runtime_limit
logging.info(u"Starting %s(%s) on %s (%s)", rdf_flow.long_flow_id,
rdf_flow.flow_class_name, client_id, start_at or "now")
rdf_flow.current_state = "Start"
flow_obj = flow_cls(rdf_flow)
# Prevent a race condition, where a flow is scheduled twice, because one
# worker inserts the row and another worker silently updates the existing row.
allow_update = False
if start_at is None:
# Store an initial version of the flow straight away. This is needed so the
# database doesn't raise consistency errors due to missing parent keys when
# writing logs / errors / results which might happen in Start().
try:
data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow, allow_update=False)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
allow_update = True
try:
# Just run the first state inline. NOTE: Running synchronously means
# that this runs on the thread that starts the flow. The advantage is
# that that Start method can raise any errors immediately.
flow_obj.Start()
# The flow does not need to actually remain running.
if not flow_obj.outstanding_requests:
flow_obj.RunStateMethod("End")
# Additional check for the correct state in case the End method raised
# and terminated the flow.
if flow_obj.IsRunning():
flow_obj.MarkDone()
except Exception as e: # pylint: disable=broad-except
# We catch all exceptions that happen in Start() and mark the flow as
# failed.
msg = compatibility.NativeStr(e)
if compatibility.PY2:
msg = msg.decode("utf-8", "replace")
flow_obj.Error(error_message=msg, backtrace=traceback.format_exc())
else:
flow_obj.CallState("Start", start_time=start_at)
flow_obj.PersistState()
try:
data_store.REL_DB.WriteFlowObject(
flow_obj.rdf_flow, allow_update=allow_update)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
if parent.is_flow:
# We can optimize here and not write requests/responses to the database
# since we have to do this for the parent flow at some point anyways.
parent.flow_obj.MergeQueuedMessages(flow_obj)
else:
flow_obj.FlushQueuedMessages()
return rdf_flow.flow_id
def ScheduleFlow(client_id: str, creator: str, flow_name, flow_args,
runner_args) -> rdf_flow_objects.ScheduledFlow:
"""Schedules a Flow on the client, to be started upon approval grant."""
scheduled_flow = rdf_flow_objects.ScheduledFlow(
client_id=client_id,
creator=creator,
scheduled_flow_id=RandomFlowId(),
flow_name=flow_name,
flow_args=flow_args,
runner_args=runner_args,
create_time=rdfvalue.RDFDatetime.Now())
data_store.REL_DB.WriteScheduledFlow(scheduled_flow)
return scheduled_flow
def UnscheduleFlow(client_id: str, creator: str,
scheduled_flow_id: str) -> None:
"""Unschedules and deletes a previously scheduled flow."""
data_store.REL_DB.DeleteScheduledFlow(
client_id=client_id, creator=creator, scheduled_flow_id=scheduled_flow_id)
def ListScheduledFlows(
client_id: str, creator: str) -> Sequence[rdf_flow_objects.ScheduledFlow]:
"""Lists all scheduled flows of a user on a client."""
return data_store.REL_DB.ListScheduledFlows(
client_id=client_id, creator=creator)
def StartScheduledFlows(client_id: str, creator: str) -> None:
"""Starts all scheduled flows of a user on a client.
This function delegates to StartFlow() to start the actual flow. If an error
occurs during StartFlow(), the ScheduledFlow is not deleted, but it is
updated by writing the `error` field to the database. The exception is NOT
re-raised and the next ScheduledFlow is attempted to be started.
Args:
client_id: The ID of the client of the ScheduledFlows.
creator: The username of the user who created the ScheduledFlows.
Raises:
UnknownClientError: if no client with client_id exists.
UnknownGRRUserError: if creator does not exist as user.
"""
# Validate existence of Client and User. Data races are not an issue - no
# flows get started in any case.
data_store.REL_DB.ReadClientMetadata(client_id)
data_store.REL_DB.ReadGRRUser(creator)
scheduled_flows = ListScheduledFlows(client_id, creator)
for sf in scheduled_flows:
try:
flow_id = _StartScheduledFlow(sf)
logging.info("Started Flow %s/%s from ScheduledFlow %s", client_id,
flow_id, sf.scheduled_flow_id)
except Exception: # pylint: disable=broad-except
logging.exception("Cannot start ScheduledFlow %s %s/%s from %s",
sf.flow_name, sf.client_id, sf.scheduled_flow_id,
sf.creator)
def _StartScheduledFlow(scheduled_flow: rdf_flow_objects.ScheduledFlow) -> str:
"""Starts a Flow from a ScheduledFlow and deletes the ScheduledFlow."""
sf = scheduled_flow
ra = scheduled_flow.runner_args
try:
flow_id = StartFlow(
client_id=sf.client_id,
creator=sf.creator,
flow_args=sf.flow_args,
flow_cls=registry.FlowRegistry.FlowClassByName(sf.flow_name),
output_plugins=ra.output_plugins,
start_at=rdfvalue.RDFDatetime.Now(),
parent=FlowParent.FromScheduledFlowID(sf.scheduled_flow_id),
cpu_limit=ra.cpu_limit,
network_bytes_limit=ra.network_bytes_limit,
# TODO(user): runtime_limit is missing in FlowRunnerArgs.
)
except Exception as e:
scheduled_flow.error = str(e)
data_store.REL_DB.WriteScheduledFlow(scheduled_flow)
raise
data_store.REL_DB.DeleteScheduledFlow(
client_id=scheduled_flow.client_id,
creator=scheduled_flow.creator,
scheduled_flow_id=scheduled_flow.scheduled_flow_id)
return flow_id
| |
import abc
import csv
from tempfile import NamedTemporaryFile
from typing import List
from django.http import HttpResponse, HttpRequest
from binder.json import jsonloads
from binder.router import list_route
class ExportFileAdapter:
"""
Adapter between the data that is exported, and the export file
"""
__metaclass__ = abc.ABCMeta
def __init__(self, request: HttpRequest):
self.request = request
@abc.abstractmethod
def set_file_name(self, file_name: str):
"""
Sets the file name of the file that needs to be export. File name does not have the extension.
e.g. set_file_name('foo') => file download name is 'foo.csv' or 'foo.xlsx'
:param file_name:
:return:
"""
pass
@abc.abstractmethod
def set_columns(self, columns: List[str]):
"""
Set the column names of the file
:param columns:
:return:
"""
pass
@abc.abstractmethod
def add_row(self, values: List[str]):
"""
Add a row with values to the file
:param values:
:return:
"""
pass
@abc.abstractmethod
def get_response(self) -> HttpResponse:
"""
Return a http response with the content of the file
:param columns:
:return:
"""
pass
class CsvFileAdapter(ExportFileAdapter):
"""
Adapter for returning CSV files
"""
def __init__(self, request: HttpRequest):
super().__init__(request)
self.response = HttpResponse(content_type='text/csv')
self.file_name = 'export'
self.writer = csv.writer(self.response)
def set_file_name(self, file_name: str):
self.file_name = file_name
def set_columns(self, columns: List[str]):
self.add_row(columns)
def add_row(self, values: List[str]):
self.writer.writerow(values)
def get_response(self) -> HttpResponse:
self.response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(self.file_name)
return self.response
class ExcelFileAdapter(ExportFileAdapter):
"""
Adapter fore returning excel files
"""
def __init__(self, request: HttpRequest):
super().__init__(request)
# Import pandas locally. This means that you can use the CSV adapter without using pandas
import openpyxl
self.openpyxl = openpyxl
self.file_name = 'export'
# self.writer = self.pandas.ExcelWriter(self.response)
self.work_book = self.openpyxl.Workbook()
self.sheet = self.work_book.create_sheet()
# The row number we are currently writing to
self._row_number = 0
def set_file_name(self, file_name: str):
self.file_name = file_name
def set_columns(self, columns: List[str]):
self.add_row(columns)
def add_row(self, values: List[str]):
for (column_id, value) in enumerate(values):
self.sheet.cell(column=column_id + 1, row=self._row_number + 1, value=value)
self._row_number += 1
def get_response(self) -> HttpResponse:
with NamedTemporaryFile() as tmp:
self.work_book.save(tmp.name)
self.response = HttpResponse(
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
content=tmp,
)
self.response['Content-Disposition'] = 'attachment; filename="{}.xlsx"'.format(self.file_name)
return self.response
DEFAULT_RESPONSE_TYPE_MAPPING = {
'xlsx': ExcelFileAdapter,
}
class RequestAwareAdapter(ExportFileAdapter):
"""
Adapter that returns csv files by default, but allows a request parameter to return other files as well
e.g. foo/download/?response_type=xlsx
returns a xlsx type
"""
def __init__(self, request: HttpRequest):
super().__init__(request)
response_type_mapping = DEFAULT_RESPONSE_TYPE_MAPPING
response_type = request.GET.get('response_type', '').lower()
AdapterClass = response_type_mapping.get(response_type, CsvFileAdapter)
self.base_adapter = AdapterClass(request)
def set_file_name(self, file_name: str):
return self.base_adapter.set_file_name(file_name)
def set_columns(self, columns: List[str]):
return self.base_adapter.set_columns(columns)
def add_row(self, values: List[str]):
return self.base_adapter.add_row(values)
def get_response(self) -> HttpResponse:
return self.base_adapter.get_response()
class CsvExportView:
"""
This class adds another endpoint to the ModelView, namely GET model/download/. This does the same thing as getting a
collection, excepts that the result is returned as a csv file, rather than a json file
"""
__metaclass__ = abc.ABCMeta
# CSV setting contains all the information that is needed to define a csv file. This must be one an instance of
# CSVExportSettings
csv_settings = None
class CsvExportSettings:
"""
This is a fake struct which contains the definition of the CSV Export
"""
def __init__(self, withs, column_map, file_name=None, default_file_name='download', multi_value_delimiter=' ',
extra_permission=None, extra_params={}, csv_adapter=RequestAwareAdapter):
"""
@param withs: String[] An array of all the withs that are necessary for this csv export
@param column_map: Tuple[] An array, with all columns of the csv file in order. Each column is represented by a tuple
(key, title) or (key, title, callback)
@param file_name: String The file name of the outputted csv file, without the csv extension, if it is a callable it will
be called on the data retrieved from the get request
@param default_file_name: String The fallback for when resolving file_name gives back None
@param multi_value_delimiter: String When one column has multiple values, they are joined, with this value
as delimiter between them. This may be if an array is returned, or if we have a one to many relation
@param extra_permission: String When set, an extra binder permission check will be done on this permission.
@param csv_adapter: Class. Either an object extending
@param response_type_mapping: Mapping between the parameter used in the custom response type
"""
self.withs = withs
self.column_map = column_map
self.file_name = file_name
self.default_file_name = default_file_name
self.multi_value_delimiter = multi_value_delimiter
self.extra_permission = extra_permission
self.extra_params = extra_params
self.csv_adapter = csv_adapter
def _generate_csv_file(self, request: HttpRequest, file_adapter: CsvFileAdapter):
# Sometimes we want to add an extra permission check before a csv file can be downloaded. This checks if the
# permission is set, and if the permission is set, checks if the current user has the specified permission
if self.csv_settings.extra_permission is not None:
self._require_model_perm(self.csv_settings.extra_permission, request)
# # A bit of a hack. We overwrite some get parameters, to make sure that we can create the CSV file
mutable = request.POST._mutable
request.GET._mutable = True
request.GET['page'] = 1
request.GET['limit'] = 10000
request.GET['with'] = ",".join(self.csv_settings.withs)
for key, value in self.csv_settings.extra_params.items():
request.GET[key] = value
request.GET._mutable = mutable
parent_result = self.get(request)
parent_data = jsonloads(parent_result.content)
file_name = self.csv_settings.file_name
if callable(file_name):
file_name = file_name(parent_data)
if file_name is None:
file_name = self.csv_settings.default_file_name
file_adapter.set_file_name(file_name)
# CSV header
file_adapter.set_columns(list(map(lambda x: x[1], self.csv_settings.column_map)))
# Make a mapping from the withs. This creates a map. This is needed for easy looking up relations
# {
# "with_name": {
# model_id: model,
# ...
# },
# ...
# }
key_mapping = {}
for key in parent_data['with']:
key_mapping[key] = {}
for row in parent_data['with'][key]:
key_mapping[key][row['id']] = row
def get_datum(data, key, prefix=''):
"""
Recursively gets the correct data point from the dataset
@param data: Dict At the first call this is the 'data' value in the response. In the recursion, we go deeper
in the dict, and we get part of the original dict. However, when we go through a with, we may end up in the
data from one of the key mappings.
@param key: String The key of the value we try to find. The level of the dictionary where we need to find
the data are delimited by a .
@return: Any: The data point present at key
"""
# Add the deepest level we can just get the specified key
if '.' not in key:
if key not in data:
raise Exception("{} not found in data: {}".format(key, data))
return data[key]
else:
"""
If we we are not at the deepest level, there are two possibilities:
- We want to go into an dict. This can be because the model has a json encoded dicts as a value, or because
the array is created by custom logic
- We want to follow a relation. In this case we either have a integer (in case of a X-to-one relation) or a
list of integers (in case of a X-to-many) relation. In this case, we reconstruct the whole path into the data
(We use the prefix for this). This is then mapped to the correct related model(s), and we go recursively
deeper in this models.
"""
head_key, subkey = key.split('.', 1)
if head_key in data:
new_prefix = '{}.{}'.format(prefix, head_key)
if type(data[head_key]) == dict:
return get_datum(data[head_key], subkey, new_prefix)
else:
# Assume that we have a mapping now
fk_ids = data[head_key]
if type(fk_ids) != list:
fk_ids = [fk_ids]
# if head_key not in key_mapping:
prefix_key = parent_data['with_mapping'][new_prefix[1:]]
datums = [str(get_datum(key_mapping[prefix_key][fk_id], subkey, new_prefix)) for fk_id in fk_ids]
return self.csv_settings.multi_value_delimiter.join(
datums
)
else:
raise Exception("{} not found in {}".format(head_key, data))
for row in parent_data['data']:
data = []
for col_definition in self.csv_settings.column_map:
datum = get_datum(row, col_definition[0])
if len(col_definition) >= 3:
transform_function = col_definition[2]
datum = transform_function(datum, row, key_mapping)
if type(datum) == list:
datum = self.csv_settings.multi_value_delimiter.join(datum)
data.append(datum)
file_adapter.add_row(data)
@list_route(name='download', methods=['GET'])
def download(self, request):
"""
Download the get request in csv form
@param request:
@return:
"""
if self.csv_settings is None:
raise Exception('No csv settings set!')
file_adapter = self.csv_settings.csv_adapter(request)
self._generate_csv_file(request, file_adapter)
return file_adapter.get_response()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedContext
if session is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
if SparkSession._instantiatedContext is None:
SparkSession._instantiatedContext = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedContext = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
"""
This module conatins helper functions and utilities.
"""
from flash.models import Collection, Deck
from flash import queries
import string
import random
import os
import StringIO
import xlrd, xlwt
def parse_deck_template_file(card_template, file_contents, mappings=None, custom=False):
"""Parses a spreadsheet into a list of cards."""
fields = card_template.fields.all().order_by('sort_order')
nfields = len(fields)
workbook = xlrd.open_workbook(file_contents=file_contents)
sheet = workbook.sheet_by_index(0)
cards = []
for row_index in range(sheet.nrows):
if custom:
rows_to_skip = [0, 1, 2]
else:
rows_to_skip = [0]
if row_index in rows_to_skip:
continue # Skip header row
card = []
for col_index in range(nfields):
val = sheet.cell(row_index, col_index).value
if mappings is not None:
if fields[col_index].get_field_type() == 'I':
val = mappings['Image'].get(val, '')
if fields[col_index].get_field_type() == 'A':
val = mappings['Audio'].get(val, '')
card.append({
"field_id": fields[col_index].id,
"value": val,
})
cards.append(card)
return cards
def template_matches_file(card_template, file_contents):
"""
Checks if the uploaded spreadsheet has the same template as the collection.
"""
workbook = xlrd.open_workbook(file_contents=file_contents)
sheet = workbook.sheet_by_index(0)
fields = card_template.fields.all().order_by('sort_order')
nfields = len(fields)
if nfields != sheet.ncols:
return False
for col_index in range(sheet.ncols):
val = sheet.cell(0, col_index).value
if val != str(fields[col_index].label):
return False
return True
def correct_custom_format(file_contents):
"""
Checks if the uploaded spreadsheet follows the correct format.
"""
workbook = xlrd.open_workbook(file_contents=file_contents)
sheet = workbook.sheet_by_index(0)
for col_index in range(sheet.ncols):
val = sheet.cell(1, col_index).value
if val not in ['Front', 'Back']:
return False
val = sheet.cell(2, col_index).value
if val not in ['Audio', 'Image', 'Text', 'Video', 'Math']:
return False
return True
def get_card_template(file_contents):
"""
Returns the card template parsed from the uploaded spreadsheet.
"""
workbook = xlrd.open_workbook(file_contents=file_contents)
sheet = workbook.sheet_by_index(0)
fields = []
for col_index in range(sheet.ncols):
field = {
"label": sheet.cell(0, col_index).value,
"side": sheet.cell(1, col_index).value,
"type": sheet.cell(2, col_index).value,
"sort_order": col_index
}
fields.append(field)
return fields
def get_file_names(card_template, file_contents, custom=False):
"""
Returns the file names that appear in the uploaded spreadsheet.
"""
fields = card_template.fields.all().order_by('sort_order')
nfields = len(fields)
columns_to_parse = []
col_index_to_parse = []
for field in fields:
if field.get_field_type() in ['A','I']:
columns_to_parse.append(str(field.label))
workbook = xlrd.open_workbook(file_contents=file_contents)
sheet = workbook.sheet_by_index(0)
for col_index in range(nfields):
val = sheet.cell(0, col_index).value
if val in columns_to_parse:
col_index_to_parse.append(col_index)
files = []
if custom:
start_row = 3
else:
start_row = 1
for row_index in range(start_row, sheet.nrows):
for col_index in col_index_to_parse:
val = sheet.cell(row_index, col_index).value
if val not in files and val != '':
files.append(val)
return files
def create_deck_template_file(card_template):
"""Creates a spreadsheet template for populating a deck of cards."""
card_template_fields = card_template.fields.all().order_by('sort_order')
output = StringIO.StringIO()
workbook = xlwt.Workbook(encoding='utf8')
worksheet = workbook.add_sheet('sheet1')
row = 0
for idx, field in enumerate(card_template_fields):
worksheet.write(row, idx, label=field.label)
workbook.save(output)
file_output = output.getvalue()
output.close()
return file_output
def create_deck_file(deck_id):
"""Creates a spreadsheet containing a deck of cards."""
deck = Deck.objects.get(id=deck_id)
output = StringIO.StringIO()
workbook = xlwt.Workbook(encoding='utf8')
worksheet = workbook.add_sheet('sheet1')
card_list = queries.getDeckCardsList(deck_id)
row = 0
for card in card_list:
if row == 0:
for idx, field in enumerate(card['fields']):
worksheet.write(row, idx, label=field['label'])
row = row + 1
for idx, field in enumerate(card['fields']):
field_value = field['value']
field_type = field['type']
if field_type in ('I','A'):
field_value = os.path.split(field_value)[1] # strips the media folder path
worksheet.write(row, idx, label=field_value)
workbook.save(output)
file_output = workbook
#file_output = output.getvalue()
output.close()
return file_output
def generate_random_id(size=10, chars=string.ascii_uppercase + string.digits):
"""
Returns a random id with the given size and from the given set of characters.
Adapted from http://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
"""
return ''.join(random.choice(chars) for _ in range(size))
def create_custom_template_file():
"""
Creates a sample custom template spreadsheet.
"""
output = StringIO.StringIO()
workbook = xlwt.Workbook(encoding='utf8')
worksheet = workbook.add_sheet('sheet1')
rows = [['Image', 'Artist','Date', 'Title', 'Materials', 'Location', 'Audio', 'Equation'],
['Front', 'Front', 'Front', 'Back', 'Back', 'Back', 'Front', 'Back'],
['Image', 'Text', 'Text', 'Text', 'Text', 'Text', 'Audio', 'Math'],
['','Some artist','','','','','sound.mp3', '\(x=\frac{1+y}{1+2z^2}\)'],
['pisa_tower.jpeg', '','','','','','']]
for i in range(len(rows)):
row = rows[i]
for j in range(len(row)):
worksheet.write(i, j, label=row[j])
workbook.save(output)
file_output = output.getvalue()
output.close()
return file_output
| |
# Copyright (c) 2014 VMware, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for images.
"""
import os
import tarfile
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from oslo_vmware import rw_handles
from nova import exception
from nova import objects
from nova import test
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vm_util
class VMwareImagesTestCase(test.NoDBTestCase):
"""Unit tests for Vmware API connection calls."""
def test_fetch_image(self):
"""Test fetching images."""
dc_name = 'fake-dc'
file_path = 'fake_file'
ds_name = 'ds1'
host = mock.MagicMock()
port = 7443
context = mock.MagicMock()
image_data = {
'id': uuids.image,
'disk_format': 'vmdk',
'size': 512,
}
read_file_handle = mock.MagicMock()
write_file_handle = mock.MagicMock()
read_iter = mock.MagicMock()
instance = objects.Instance(id=1,
uuid=uuids.foo,
image_ref=image_data['id'])
def fake_read_handle(read_iter):
return read_file_handle
def fake_write_handle(host, port, dc_name, ds_name, cookies,
file_path, file_size):
return write_file_handle
with test.nested(
mock.patch.object(rw_handles, 'ImageReadHandle',
side_effect=fake_read_handle),
mock.patch.object(rw_handles, 'FileWriteHandle',
side_effect=fake_write_handle),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images.IMAGE_API, 'get',
return_value=image_data),
mock.patch.object(images.IMAGE_API, 'download',
return_value=read_iter),
) as (glance_read, http_write, image_transfer, image_show,
image_download):
images.fetch_image(context, instance,
host, port, dc_name,
ds_name, file_path)
glance_read.assert_called_once_with(read_iter)
http_write.assert_called_once_with(host, port, dc_name, ds_name, None,
file_path, image_data['size'])
image_transfer.assert_called_once_with(read_file_handle,
write_file_handle)
image_download.assert_called_once_with(context, instance['image_ref'])
image_show.assert_called_once_with(context, instance['image_ref'])
def _setup_mock_get_remote_image_service(self,
mock_get_remote_image_service,
metadata):
mock_image_service = mock.MagicMock()
mock_image_service.show.return_value = metadata
mock_get_remote_image_service.return_value = [mock_image_service, 'i']
def test_get_vmdk_name_from_ovf(self):
ovf_path = os.path.join(os.path.dirname(__file__), 'ovf.xml')
with open(ovf_path) as f:
ovf_descriptor = f.read()
vmdk_name = images.get_vmdk_name_from_ovf(ovf_descriptor)
self.assertEqual("Damn_Small_Linux-disk1.vmdk", vmdk_name)
@mock.patch('oslo_vmware.rw_handles.ImageReadHandle')
@mock.patch('oslo_vmware.rw_handles.VmdkWriteHandle')
@mock.patch.object(tarfile, 'open')
def test_fetch_image_ova(self, mock_tar_open, mock_write_class,
mock_read_class):
session = mock.MagicMock()
ovf_descriptor = None
ovf_path = os.path.join(os.path.dirname(__file__), 'ovf.xml')
with open(ovf_path) as f:
ovf_descriptor = f.read()
with test.nested(
mock.patch.object(images.IMAGE_API, 'get'),
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
'size': 512}
instance = mock.MagicMock()
instance.image_ref = image_data['id']
mock_image_api_get.return_value = image_data
vm_folder_ref = mock.MagicMock()
res_pool_ref = mock.MagicMock()
context = mock.MagicMock()
mock_read_handle = mock.MagicMock()
mock_read_class.return_value = mock_read_handle
mock_write_handle = mock.MagicMock()
mock_write_class.return_value = mock_write_handle
mock_write_handle.get_imported_vm.return_value = \
mock.sentinel.vm_ref
mock_ovf = mock.MagicMock()
mock_ovf.name = 'dsl.ovf'
mock_vmdk = mock.MagicMock()
mock_vmdk.name = "Damn_Small_Linux-disk1.vmdk"
def fake_extract(name):
if name == mock_ovf:
m = mock.MagicMock()
m.read.return_value = ovf_descriptor
return m
elif name == mock_vmdk:
return mock_read_handle
mock_tar = mock.MagicMock()
mock_tar.__iter__ = mock.Mock(return_value = iter([mock_ovf,
mock_vmdk]))
mock_tar.extractfile = fake_extract
mock_tar_open.return_value.__enter__.return_value = mock_tar
images.fetch_image_ova(
context, instance, session, 'fake-vm', 'fake-datastore',
vm_folder_ref, res_pool_ref)
mock_tar_open.assert_called_once_with(mode='r|',
fileobj=mock_read_handle)
mock_image_transfer.assert_called_once_with(mock_read_handle,
mock_write_handle)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
mock_call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
@mock.patch('oslo_vmware.rw_handles.ImageReadHandle')
@mock.patch('oslo_vmware.rw_handles.VmdkWriteHandle')
def test_fetch_image_stream_optimized(self,
mock_write_class,
mock_read_class):
"""Test fetching streamOptimized disk image."""
session = mock.MagicMock()
with test.nested(
mock.patch.object(images.IMAGE_API, 'get'),
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
'size': 512}
instance = mock.MagicMock()
instance.image_ref = image_data['id']
mock_image_api_get.return_value = image_data
vm_folder_ref = mock.MagicMock()
res_pool_ref = mock.MagicMock()
context = mock.MagicMock()
mock_read_handle = mock.MagicMock()
mock_read_class.return_value = mock_read_handle
mock_write_handle = mock.MagicMock()
mock_write_class.return_value = mock_write_handle
mock_write_handle.get_imported_vm.return_value = \
mock.sentinel.vm_ref
images.fetch_image_stream_optimized(
context, instance, session, 'fake-vm', 'fake-datastore',
vm_folder_ref, res_pool_ref)
mock_image_transfer.assert_called_once_with(mock_read_handle,
mock_write_handle)
mock_call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
def test_from_image_with_image_ref(self):
raw_disk_size_in_gb = 83
raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi
mdata = {'size': raw_disk_size_in_bytes,
'disk_format': 'vmdk',
'properties': {
"vmware_ostype": constants.DEFAULT_OS_TYPE,
"vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE,
"vmware_disktype": constants.DEFAULT_DISK_TYPE,
"hw_vif_model": constants.DEFAULT_VIF_MODEL,
"vmware_linked_clone": True}}
mdata = objects.ImageMeta.from_dict(mdata)
with mock.patch.object(
images, 'get_vsphere_location', return_value=None,
):
img_props = images.VMwareImage.from_image(None, uuids.image, mdata)
image_size_in_kb = raw_disk_size_in_bytes / units.Ki
# assert that defaults are set and no value returned is left empty
self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
img_props.adapter_type)
self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type)
self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model)
self.assertTrue(img_props.linked_clone)
self.assertEqual(image_size_in_kb, img_props.file_size_in_kb)
def _image_build(self, image_lc_setting, global_lc_setting,
disk_format=constants.DEFAULT_DISK_FORMAT,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
vif_model=constants.DEFAULT_VIF_MODEL,
vsphere_location=None):
self.flags(use_linked_clone=global_lc_setting, group='vmware')
raw_disk_size_in_gb = 93
raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi
mdata = {'size': raw_disk_size_in_btyes,
'disk_format': disk_format,
'properties': {
"vmware_ostype": os_type,
"vmware_adaptertype": adapter_type,
"vmware_disktype": disk_type,
"hw_vif_model": vif_model}}
if image_lc_setting is not None:
mdata['properties']["vmware_linked_clone"] = image_lc_setting
context = mock.Mock()
mdata = objects.ImageMeta.from_dict(mdata)
with mock.patch.object(
images, 'get_vsphere_location', return_value=vsphere_location,
):
return images.VMwareImage.from_image(context, uuids.image, mdata)
def test_use_linked_clone_override_nf(self):
image_props = self._image_build(None, False)
self.assertFalse(image_props.linked_clone,
"No overrides present but still overridden!")
def test_use_linked_clone_override_nt(self):
image_props = self._image_build(None, True)
self.assertTrue(image_props.linked_clone,
"No overrides present but still overridden!")
def test_use_linked_clone_override_ny(self):
image_props = self._image_build(None, "yes")
self.assertTrue(image_props.linked_clone,
"No overrides present but still overridden!")
def test_use_linked_clone_override_ft(self):
image_props = self._image_build(False, True)
self.assertFalse(image_props.linked_clone,
"image level metadata failed to override global")
def test_use_linked_clone_override_string_nt(self):
image_props = self._image_build("no", True)
self.assertFalse(image_props.linked_clone,
"image level metadata failed to override global")
def test_use_linked_clone_override_string_yf(self):
image_props = self._image_build("yes", False)
self.assertTrue(image_props.linked_clone,
"image level metadata failed to override global")
def test_use_disk_format_iso(self):
image = self._image_build(None, True, disk_format='iso')
self.assertEqual('iso', image.file_type)
self.assertTrue(image.is_iso)
def test_use_bad_disk_format(self):
self.assertRaises(exception.InvalidDiskFormat,
self._image_build,
None,
True,
disk_format='bad_disk_format')
def test_image_no_defaults(self):
image = self._image_build(False, False,
disk_format='iso',
os_type='otherGuest',
adapter_type='lsiLogic',
disk_type='preallocated',
vif_model='e1000e')
self.assertEqual('iso', image.file_type)
self.assertEqual('otherGuest', image.os_type)
self.assertEqual('lsiLogic', image.adapter_type)
self.assertEqual('preallocated', image.disk_type)
self.assertEqual('e1000e', image.vif_model)
self.assertFalse(image.linked_clone)
def test_image_defaults(self):
image = images.VMwareImage(image_id='fake-image-id')
# N.B. We intentially don't use the defined constants here. Amongst
# other potential failures, we're interested in changes to their
# values, which would not otherwise be picked up.
self.assertEqual('otherGuest', image.os_type)
self.assertEqual('lsiLogic', image.adapter_type)
self.assertEqual('preallocated', image.disk_type)
self.assertEqual('e1000', image.vif_model)
def test_use_vsphere_location(self):
image = self._image_build(None, True, vsphere_location='vsphere://ok')
self.assertEqual('vsphere://ok', image.vsphere_location)
def test_get_vsphere_location(self):
expected = 'vsphere://ok'
metadata = {'locations': [{}, {'url': 'http://ko'}, {'url': expected}]}
with mock.patch.object(images.IMAGE_API, 'get', return_value=metadata):
context = mock.Mock()
observed = images.get_vsphere_location(context, 'image_id')
self.assertEqual(expected, observed)
def test_get_no_vsphere_location(self):
metadata = {'locations': [{}, {'url': 'http://ko'}]}
with mock.patch.object(images.IMAGE_API, 'get', return_value=metadata):
context = mock.Mock()
observed = images.get_vsphere_location(context, 'image_id')
self.assertIsNone(observed)
def test_get_vsphere_location_no_image(self):
context = mock.Mock()
observed = images.get_vsphere_location(context, None)
self.assertIsNone(observed)
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import cPickle as pickle
import errno
import socket
import time
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo.config import cfg
from nova import context
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import versionutils
from nova import utils
from nova import version
from nova.virt.xenapi.client import objects as cli_objects
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
LOG = logging.getLogger(__name__)
xenapi_session_opts = [
cfg.IntOpt('login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
cfg.IntOpt('connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_session_opts, 'xenserver')
CONF.import_opt('host', 'nova.netconf')
def apply_session_helpers(session):
session.VM = cli_objects.VM(session)
session.SR = cli_objects.SR(session)
session.VDI = cli_objects.VDI(session)
session.VBD = cli_objects.VBD(session)
session.PBD = cli_objects.PBD(session)
session.PIF = cli_objects.PIF(session)
session.VLAN = cli_objects.VLAN(session)
session.host = cli_objects.Host(session)
session.network = cli_objects.Network(session)
session.pool = cli_objects.Pool(session)
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
# This is not a config option as it should only ever be
# changed in development environments.
# MAJOR VERSION: Incompatible changes with the plugins
# MINOR VERSION: Compatible changes, new plguins, etc
PLUGIN_REQUIRED_VERSION = '1.2'
def __init__(self, url, user, pw):
version_string = version.version_string_with_package()
self.nova_version = _('%(vendor)s %(product)s %(version)s') % \
{'vendor': version.vendor_string(),
'product': version.product_string(),
'version': version_string}
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.host_ref = self._get_host_ref()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._verify_plugin_version()
apply_session_helpers(self)
def _verify_plugin_version(self):
requested_version = self.PLUGIN_REQUIRED_VERSION
current_version = self.call_plugin_serialized(
'nova_plugin_version', 'get_version')
if not versionutils.is_compatible(requested_version, current_version):
raise self.XenAPI.Failure(
_("Plugin version mismatch (Expected %(exp)s, got %(got)s)") %
{'exp': requested_version, 'got': current_version})
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
except self.XenAPI.Failure as e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in xrange(CONF.xenserver.connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = objects.AggregateList.get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_LE('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand.
"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
# Product version is only set in some cases (e.g. XCP, XenServer) and
# not in others (e.g. xenserver-core, XAPI-XCP).
# In these cases, the platform version is the best number to use.
if product_version_str is None:
product_version_str = software_version.get('platform_version',
'0.0.0')
product_brand = software_version.get('product_brand')
product_version = utils.convert_version_to_tuple(product_version_str)
return product_version, product_brand
def _get_software_version(self):
return self.call_xenapi('host.get_software_version', self.host_ref)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def _get_host_ref(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
self.host_ref, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, retry_cb=None, *args,
**kwargs):
"""Allows a plugin to raise RetryableError so we can try again."""
attempts = num_retries + 1
sleep_time = 0.5
for attempt in xrange(1, attempts + 1):
try:
if attempt > 1:
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
callback_result = None
if callback:
callback_result = callback(kwargs)
msg = ('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d, '
'callback_result: %(callback_result)s')
LOG.debug(msg,
{'plugin': plugin, 'fn': fn, 'attempt': attempt,
'attempts': attempts,
'callback_result': callback_result})
return self.call_plugin_serialized(plugin, fn, *args, **kwargs)
except self.XenAPI.Failure as exc:
if self._is_retryable_exception(exc, fn):
LOG.warn(_('%(plugin)s.%(fn)s failed. Retrying call.')
% {'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
except socket.error as exc:
if exc.errno == errno.ECONNRESET:
LOG.warn(_('Lost connection to XenAPI during call to '
'%(plugin)s.%(fn)s. Retrying call.') %
{'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
raise exception.PluginRetriesExceeded(num_retries=num_retries)
def _is_retryable_exception(self, exc, fn):
_type, method, error = exc.details[:3]
if error == 'RetryableError':
LOG.debug("RetryableError, so retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
elif "signal" in method:
LOG.debug("Error due to a signal, retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
else:
return False
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
self.is_local_connection = url == "unix://local"
if self.is_local_connection:
return self.XenAPI.xapi_local()
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug("Got exception: %s", exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.
params = eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug("Got exception: %s", exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
return self.call_xenapi('%s.get_all_records' % record_type).items()
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test case that utilizes httplib2 against the API server"""
import hashlib
import httplib2
from glance.openstack.common import jsonutils
from glance.openstack.common import units
from glance.tests import functional
from glance.tests.utils import minimal_headers
from glance.tests.utils import skip_if_disabled
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
class TestApi(functional.FunctionalTest):
"""Functional tests using httplib2 against the API server"""
@skip_if_disabled
def test_get_head_simple_post(self):
"""
We test the following sequential series of actions:
0. GET /images
- Verify no public images
1. GET /images/detail
- Verify no public images
2. POST /images with public image named Image1
and no custom properties
- Verify 201 returned
3. HEAD image
- Verify HTTP headers have correct information we just added
4. GET image
- Verify all information on image we just added is correct
5. GET /images
- Verify the image we just added is returned
6. GET /images/detail
- Verify the image we just added is returned
7. PUT image with custom properties of "distro" and "arch"
- Verify 200 returned
8. PUT image with too many custom properties
- Verify 413 returned
9. GET image
- Verify updated information about image was stored
10. PUT image
- Remove a previously existing property.
11. PUT image
- Add a previously deleted property.
12. PUT image/members/member1
- Add member1 to image
13. PUT image/members/member2
- Add member2 to image
14. GET image/members
- List image members
15. DELETE image/members/member1
- Delete image member1
16. PUT image/members
- Attempt to replace members with an overlimit amount
17. PUT image/members/member11
- Attempt to add a member while at limit
18. POST /images with another public image named Image2
- attribute and three custom properties, "distro", "arch" & "foo"
- Verify a 200 OK is returned
19. HEAD image2
- Verify image2 found now
20. GET /images
- Verify 2 public images
21. GET /images with filter on user-defined property "distro".
- Verify both images are returned
22. GET /images with filter on user-defined property 'distro' but
- with non-existent value. Verify no images are returned
23. GET /images with filter on non-existent user-defined property
- "boo". Verify no images are returned
24. GET /images with filter 'arch=i386'
- Verify only image2 is returned
25. GET /images with filter 'arch=x86_64'
- Verify only image1 is returned
26. GET /images with filter 'foo=bar'
- Verify only image2 is returned
27. DELETE image1
- Delete image
28. GET image/members
- List deleted image members
29. PUT image/members/member2
- Update existing member2 of deleted image
30. PUT image/members/member3
- Add member3 to deleted image
31. DELETE image/members/member2
- Delete member2 from deleted image
32. DELETE image2
- Delete image
33. GET /images
- Verify no images are listed
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# 0. GET /images
# Verify no public images
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 1. GET /images/detail
# Verify no public images
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 2. POST /images with public image named Image1
# attribute and no custom properties. Verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertTrue(data['image']['is_public'])
# 3. HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
# 4. GET image
# Verify all information on image we just added is correct
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image_headers = {
'x-image-meta-id': image_id,
'x-image-meta-name': 'Image1',
'x-image-meta-is_public': 'True',
'x-image-meta-status': 'active',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'x-image-meta-size': str(FIVE_KB)}
expected_std_headers = {
'content-length': str(FIVE_KB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_image_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
self.assertEqual(content, "*" * FIVE_KB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_KB).hexdigest())
# 5. GET /images
# Verify one public image
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(jsonutils.loads(content), expected_result)
# 6. GET /images/detail
# Verify image and all its metadata
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {},
"size": 5120}
image = jsonutils.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 7. PUT image with custom properties of "distro" and "arch"
# Verify 200 returned
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(response.status, 200)
data = jsonutils.loads(content)
self.assertEqual(data['image']['properties']['arch'], "x86_64")
self.assertEqual(data['image']['properties']['distro'], "Ubuntu")
# 8. PUT image with too many custom properties
# Verify 413 returned
headers = {}
for i in range(11): # configured limit is 10
headers['X-Image-Meta-Property-foo%d' % i] = 'bar'
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(response.status, 413)
# 9. GET /images/detail
# Verify image and all its metadata
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {'distro': 'Ubuntu', 'arch': 'x86_64'},
"size": 5120}
image = jsonutils.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 10. PUT image and remove a previously existing property.
headers = {'X-Image-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(response.status, 200)
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = jsonutils.loads(content)['images'][0]
self.assertEqual(len(data['properties']), 1)
self.assertEqual(data['properties']['arch'], "x86_64")
# 11. PUT image and add a previously deleted property.
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(response.status, 200)
data = jsonutils.loads(content)
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = jsonutils.loads(content)['images'][0]
self.assertEqual(len(data['properties']), 2)
self.assertEqual(data['properties']['arch'], "x86_64")
self.assertEqual(data['properties']['distro'], "Ubuntu")
self.assertNotEqual(data['created_at'], data['updated_at'])
# 12. Add member to image
path = ("http://%s:%d/v1/images/%s/members/pattieblack" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 204)
# 13. Add member to image
path = ("http://%s:%d/v1/images/%s/members/pattiewhite" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 204)
# 14. List image members
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = jsonutils.loads(content)
self.assertEqual(len(data['members']), 2)
self.assertEqual(data['members'][0]['member_id'], 'pattieblack')
self.assertEqual(data['members'][1]['member_id'], 'pattiewhite')
# 15. Delete image member
path = ("http://%s:%d/v1/images/%s/members/pattieblack" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 204)
# 16. Attempt to replace members with an overlimit amount
# Adding 11 image members should fail since configured limit is 10
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
memberships = []
for i in range(11):
member_id = "foo%d" % i
memberships.append(dict(member_id=member_id))
http = httplib2.Http()
body = jsonutils.dumps(dict(memberships=memberships))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(response.status, 413)
# 17. Attempt to add a member while at limit
# Adding an 11th member should fail since configured limit is 10
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
memberships = []
for i in range(10):
member_id = "foo%d" % i
memberships.append(dict(member_id=member_id))
http = httplib2.Http()
body = jsonutils.dumps(dict(memberships=memberships))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(response.status, 204)
path = ("http://%s:%d/v1/images/%s/members/fail_me" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 413)
# 18. POST /images with another public image named Image2
# attribute and three custom properties, "distro", "arch" & "foo".
# Verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image2')
headers['X-Image-Meta-Property-Distro'] = 'Ubuntu'
headers['X-Image-Meta-Property-Arch'] = 'i386'
headers['X-Image-Meta-Property-foo'] = 'bar'
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = jsonutils.loads(content)
image2_id = data['image']['id']
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image2")
self.assertTrue(data['image']['is_public'])
self.assertEqual(data['image']['properties']['distro'], 'Ubuntu')
self.assertEqual(data['image']['properties']['arch'], 'i386')
self.assertEqual(data['image']['properties']['foo'], 'bar')
# 19. HEAD image2
# Verify image2 found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image2_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image2")
# 20. GET /images
# Verify 2 public images
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 2)
self.assertEqual(images[0]['id'], image2_id)
self.assertEqual(images[1]['id'], image_id)
# 21. GET /images with filter on user-defined property 'distro'.
# Verify both images are returned
path = "http://%s:%d/v1/images?property-distro=Ubuntu" % \
("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 2)
self.assertEqual(images[0]['id'], image2_id)
self.assertEqual(images[1]['id'], image_id)
# 22. GET /images with filter on user-defined property 'distro' but
# with non-existent value. Verify no images are returned
path = "http://%s:%d/v1/images?property-distro=fedora" % \
("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 0)
# 23. GET /images with filter on non-existent user-defined property
# 'boo'. Verify no images are returned
path = "http://%s:%d/v1/images?property-boo=bar" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 0)
# 24. GET /images with filter 'arch=i386'
# Verify only image2 is returned
path = "http://%s:%d/v1/images?property-arch=i386" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 1)
self.assertEqual(images[0]['id'], image2_id)
# 25. GET /images with filter 'arch=x86_64'
# Verify only image1 is returned
path = "http://%s:%d/v1/images?property-arch=x86_64" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 1)
self.assertEqual(images[0]['id'], image_id)
# 26. GET /images with filter 'foo=bar'
# Verify only image2 is returned
path = "http://%s:%d/v1/images?property-foo=bar" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 1)
self.assertEqual(images[0]['id'], image2_id)
# 27. DELETE image1
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# 28. Try to list members of deleted image
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 404)
# 29. Try to update member of deleted image
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
body = jsonutils.dumps(dict(memberships=fixture))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(response.status, 404)
# 30. Try to add member to deleted image
path = ("http://%s:%d/v1/images/%s/members/chickenpattie" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 404)
# 31. Try to delete member of deleted image
path = ("http://%s:%d/v1/images/%s/members/pattieblack" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 404)
# 32. DELETE image2
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image2_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# 33. GET /images
# Verify no images are listed
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = jsonutils.loads(content)['images']
self.assertEqual(len(images), 0)
# 34. HEAD /images/detail
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(405, response.status)
self.assertEqual('GET', response.get('allow'))
self.stop_servers()
def test_download_non_exists_image_raises_http_forbidden(self):
"""
We test the following sequential series of actions:
0. POST /images with public image named Image1
and no custom properties
- Verify 201 returned
1. HEAD image
- Verify HTTP headers have correct information we just added
2. GET image
- Verify all information on image we just added is correct
3. DELETE image1
- Delete the newly added image
4. GET image
- Verify that 403 HTTPForbidden exception is raised prior to
404 HTTPNotFound
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertTrue(data['image']['is_public'])
# 1. HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
# 2. GET /images
# Verify one public image
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(jsonutils.loads(content), expected_result)
# 3. DELETE image1
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# 4. GET image
# Verify that 403 HTTPForbidden exception is raised prior to
# 404 HTTPNotFound
rules = {"download_image": '!'}
self.set_policy_rules(rules)
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 403)
self.stop_servers()
def test_download_non_exists_image_raises_http_not_found(self):
"""
We test the following sequential series of actions:
0. POST /images with public image named Image1
and no custom properties
- Verify 201 returned
1. HEAD image
- Verify HTTP headers have correct information we just added
2. GET image
- Verify all information on image we just added is correct
3. DELETE image1
- Delete the newly added image
4. GET image
- Verify that 404 HTTPNotFound exception is raised
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertTrue(data['image']['is_public'])
# 1. HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
# 2. GET /images
# Verify one public image
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(jsonutils.loads(content), expected_result)
# 3. DELETE image1
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# 4. GET image
# Verify that 404 HTTPNotFound exception is raised
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 404)
self.stop_servers()
def test_download_image_with_no_restricted_property_set_to_image(self):
"""
We test the following sequential series of actions:
0. POST /images with public image named Image1
and no custom properties
- Verify 201 returned
1. GET image
2. DELETE image1
- Delete the newly added image
"""
self.cleanup()
rules = {"context_is_admin": "role:admin",
"default": "",
"restricted":
"not ('test_key':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
headers.update({'X-Roles': 'member'})
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertTrue(data['image']['is_public'])
# 1. GET image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
# 2. DELETE image1
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
self.stop_servers()
def test_status_cannot_be_manipulated_directly(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
headers = minimal_headers('Image1')
# Create a 'queued' image
http = httplib2.Http()
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Disk-Format': 'raw',
'X-Image-Meta-Container-Format': 'bare'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'POST', headers=headers,
body=None)
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('queued', image['status'])
# Ensure status of 'queued' image can't be changed
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image['id'])
http = httplib2.Http()
headers = {'X-Image-Meta-Status': 'active'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(403, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('queued', response['x-image-meta-status'])
# We allow 'setting' to the same status
http = httplib2.Http()
headers = {'X-Image-Meta-Status': 'queued'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('queued', response['x-image-meta-status'])
# Make image active
http = httplib2.Http()
headers = {'Content-Type': 'application/octet-stream'}
response, content = http.request(path, 'PUT', headers=headers,
body='data')
self.assertEqual(200, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
# Ensure status of 'active' image can't be changed
http = httplib2.Http()
headers = {'X-Image-Meta-Status': 'queued'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(403, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('active', response['x-image-meta-status'])
# We allow 'setting' to the same status
http = httplib2.Http()
headers = {'X-Image-Meta-Status': 'active'}
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('active', response['x-image-meta-status'])
# Create a 'queued' image, ensure 'status' header is ignored
http = httplib2.Http()
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Status': 'active'}
response, content = http.request(path, 'POST', headers=headers,
body=None)
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('queued', image['status'])
# Create an 'active' image, ensure 'status' header is ignored
http = httplib2.Http()
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Disk-Format': 'raw',
'X-Image-Meta-Status': 'queued',
'X-Image-Meta-Container-Format': 'bare'}
response, content = http.request(path, 'POST', headers=headers,
body='data')
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
self.stop_servers()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_edits which is used in tf upgraders.
All of the tests assume that we want to change from an API containing
import foo as f
def f(a, b, kw1, kw2): ...
def g(a, b, kw1, c, kw1_alias): ...
def g2(a, b, kw1, c, d, kw1_alias): ...
def h(a, kw1, kw2, kw1_alias, kw2_alias): ...
and the changes to the API consist of renaming, reordering, and/or removing
arguments. Thus, we want to be able to generate changes to produce each of the
following new APIs:
import bar as f
def f(a, b, kw1, kw3): ...
def f(a, b, kw2, kw1): ...
def f(a, b, kw3, kw1): ...
def g(a, b, kw1, c): ...
def g(a, b, c, kw1): ...
def g2(a, b, kw1, c, d): ...
def g2(a, b, c, d, kw1): ...
def h(a, kw1, kw2): ...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
class ModuleDeprecationSpec(ast_edits.NoUpdateSpec):
"""A specification which deprecates 'a.b'."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.module_deprecations.update({"a.b": (ast_edits.ERROR, "a.b is evil.")})
class RenameKeywordSpec(ast_edits.NoUpdateSpec):
"""A specification where kw2 gets renamed to kw3.
The new API is
def f(a, b, kw1, kw3): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.update_renames()
def update_renames(self):
self.function_keyword_renames["f"] = {"kw2": "kw3"}
class ReorderKeywordSpec(ast_edits.NoUpdateSpec):
"""A specification where kw2 gets moved in front of kw1.
The new API is
def f(a, b, kw2, kw1): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.update_reorders()
def update_reorders(self):
# Note that these should be in the old order.
self.function_reorders["f"] = ["a", "b", "kw1", "kw2"]
class ReorderAndRenameKeywordSpec(ReorderKeywordSpec, RenameKeywordSpec):
"""A specification where kw2 gets moved in front of kw1 and is changed to kw3.
The new API is
def f(a, b, kw3, kw1): ...
"""
def __init__(self):
ReorderKeywordSpec.__init__(self)
RenameKeywordSpec.__init__(self)
self.update_renames()
self.update_reorders()
class RemoveDeprecatedAliasKeyword(ast_edits.NoUpdateSpec):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, kw1, c): ...
def g2(a, b, kw1, c, d): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_keyword_renames["g"] = {"kw1_alias": "kw1"}
self.function_keyword_renames["g2"] = {"kw1_alias": "kw1"}
class RemoveDeprecatedAliasAndReorderRest(RemoveDeprecatedAliasKeyword):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, c, kw1): ...
def g2(a, b, c, d, kw1): ...
"""
def __init__(self):
RemoveDeprecatedAliasKeyword.__init__(self)
# Note that these should be in the old order.
self.function_reorders["g"] = ["a", "b", "kw1", "c"]
self.function_reorders["g2"] = ["a", "b", "kw1", "c", "d"]
class RemoveMultipleKeywordArguments(ast_edits.NoUpdateSpec):
"""A specification where both keyword aliases are removed from h.
The new API is
def h(a, kw1, kw2): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_keyword_renames["h"] = {
"kw1_alias": "kw1",
"kw2_alias": "kw2",
}
class RenameImports(ast_edits.NoUpdateSpec):
"""Specification for renaming imports."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.import_renames = {
"foo": ast_edits.ImportRename(
"bar",
excluded_prefixes=["foo.baz"])
}
class TestAstEdits(test_util.TensorFlowTestCase):
def _upgrade(self, spec, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(spec)
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return (count, report, errors), out_file.getvalue()
def testModuleDeprecation(self):
text = "a.b.c(a.b.x)"
(_, _, errors), new_text = self._upgrade(ModuleDeprecationSpec(), text)
self.assertEqual(text, new_text)
self.assertIn("Using member a.b.c", errors[0])
self.assertIn("1:0", errors[0])
self.assertIn("Using member a.b.c", errors[0])
self.assertIn("1:6", errors[1])
def testNoTransformIfNothingIsSupplied(self):
text = "f(a, b, kw1=c, kw2=d)\n"
_, new_text = self._upgrade(ast_edits.NoUpdateSpec(), text)
self.assertEqual(new_text, text)
text = "f(a, b, c, d)\n"
_, new_text = self._upgrade(ast_edits.NoUpdateSpec(), text)
self.assertEqual(new_text, text)
def testKeywordRename(self):
"""Test that we get the expected result if renaming kw2 to kw3."""
text = "f(a, b, kw1=c, kw2=d)\n"
expected = "f(a, b, kw1=c, kw3=d)\n"
(_, report, _), new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, expected)
self.assertNotIn("Manual check required", report)
# No keywords specified, no reordering, so we should get input as output
text = "f(a, b, c, d)\n"
(_, report, _), new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, text)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, *args)\n"
(_, report, _), _ = self._upgrade(RenameKeywordSpec(), text)
self.assertNotIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(RenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
def testKeywordReorderWithParens(self):
"""Test that we get the expected result if there are parens around args."""
text = "f((a), ( ( b ) ))\n"
acceptable_outputs = [
# No change is a valid output
text,
# Also cases where all arguments are fully specified are allowed
"f(a=(a), b=( ( b ) ))\n",
# Making the parens canonical is ok
"f(a=(a), b=((b)))\n",
]
_, new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
def testKeywordReorder(self):
"""Test that we get the expected result if kw2 is now before kw1."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
# No change is a valid output
text,
# Just reordering the kw.. args is also ok
"f(a, b, kw2=d, kw1=c)\n",
# Also cases where all arguments are fully specified are allowed
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, b, *args)\n"
(_, report, _), _ = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(ReorderKeywordSpec(), text)
self.assertNotIn("Manual check required", report)
def testKeywordReorderAndRename(self):
"""Test that we get the expected result if kw2 is renamed and moved."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
"f(a, b, kw3=d, kw1=c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(
ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(
ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, *args, kw1=c)\n"
(_, report, _), _ = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
def testRemoveDeprecatedKeywordAlias(self):
"""Test that we get the expected result if a keyword alias is removed."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
# Not using deprecated alias, so original is ok
text,
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# No keyword used, should be no change
text = "g(a, b, x, c)\n"
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertEqual(new_text, text)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder(self):
"""Test for when a keyword alias is removed and args are reordered."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g(a, b, x, c)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g(a, b, c, x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder2(self):
"""Same as testRemoveDeprecatedKeywordAndReorder but on g2 (more args)."""
text = "g2(a, b, kw1=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g2(a, b, x, c, d)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g2(a, b, c, d, x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g2(a, b, kw1_alias=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's not in order
text = "g2(a, b, d=d, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a, b, d=d, c=c, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, d=d, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveMultipleKeywords(self):
"""Remove multiple keywords at once."""
# Not using deprecated keywords -> no rename
text = "h(a, kw1=x, kw2=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Using positional arguments (in proper order) -> no change
text = "h(a, x, y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Use only the old names, in order
text = "h(a, kw1_alias=x, kw2_alias=y)\n"
acceptable_outputs = [
"h(a, x, y)\n",
"h(a, kw1=x, kw2=y)\n",
"h(a=a, kw1=x, kw2=y)\n",
"h(a, kw2=y, kw1=x)\n",
"h(a=a, kw2=y, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Use only the old names, in reverse order, should give one of same outputs
text = "h(a, kw2_alias=y, kw1_alias=x)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Mix old and new names
text = "h(a, kw1=x, kw2_alias=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
def testUnrestrictedFunctionWarnings(self):
class FooWarningSpec(ast_edits.NoUpdateSpec):
"""Usages of function attribute foo() prints out a warning."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_warnings = {"*.foo": (ast_edits.WARNING, "not good")}
texts = ["object.foo()", "get_object().foo()",
"get_object().foo()", "object.foo().bar()"]
for text in texts:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertIn("not good", report)
# Note that foo() won't result in a warning, because in this case foo is
# not an attribute, but a name.
false_alarms = ["foo", "foo()", "foo.bar()", "obj.run_foo()", "obj.foo"]
for text in false_alarms:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertNotIn("not good", report)
def testFullNameNode(self):
t = ast_edits.full_name_node("a.b.c")
self.assertEquals(
ast.dump(t),
"Attribute(value=Attribute(value=Name(id='a', ctx=Load()), attr='b', "
"ctx=Load()), attr='c', ctx=Load())"
)
def testImport(self):
# foo should be renamed to bar.
text = "import foo as f"
expected_text = "import bar as f"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo"
expected_text = "import bar as foo"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo.test"
expected_text = "import bar.test"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo.test as t"
expected_text = "import bar.test as t"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo as f, a as b"
expected_text = "import bar as f, a as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testFromImport(self):
# foo should be renamed to bar.
text = "from foo import a"
expected_text = "from bar import a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo.a import b"
expected_text = "from bar.a import b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import *"
expected_text = "from bar import *"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import a, b"
expected_text = "from bar import a, b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testImport_NoChangeNeeded(self):
text = "import bar as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testFromImport_NoChangeNeeded(self):
text = "from bar import a as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testExcludedImport(self):
# foo.baz module is excluded from changes.
text = "import foo.baz"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "import foo.baz as a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "from foo import baz as a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "from foo.baz import a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testMultipleImports(self):
text = "import foo.bar as a, foo.baz as b, foo.baz.c, foo.d"
expected_text = "import bar.bar as a, foo.baz as b, foo.baz.c, bar.d"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import baz, a, c"
expected_text = """from foo import baz
from bar import a, c"""
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testImportInsideFunction(self):
text = """
def t():
from c import d
from foo import baz, a
from e import y
"""
expected_text = """
def t():
from c import d
from foo import baz
from bar import a
from e import y
"""
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testUpgradeInplaceWithSymlink(self):
if os.name == "nt":
self.skipTest("os.symlink doesn't work uniformly on Windows.")
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
os.mkdir(upgrade_dir)
file_a = os.path.join(upgrade_dir, "a.py")
file_b = os.path.join(upgrade_dir, "b.py")
with open(file_a, "a") as f:
f.write("import foo as f")
os.symlink(file_a, file_b)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree_inplace(upgrade_dir)
self.assertTrue(os.path.islink(file_b))
self.assertEqual(file_a, os.readlink(file_b))
with open(file_a, "r") as f:
self.assertEqual("import bar as f", f.read())
def testUpgradeInPlaceWithSymlinkInDifferentDir(self):
if os.name == "nt":
self.skipTest("os.symlink doesn't work uniformly on Windows.")
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
other_dir = os.path.join(self.get_temp_dir(), "bar")
os.mkdir(upgrade_dir)
os.mkdir(other_dir)
file_c = os.path.join(other_dir, "c.py")
file_d = os.path.join(upgrade_dir, "d.py")
with open(file_c, "a") as f:
f.write("import foo as f")
os.symlink(file_c, file_d)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree_inplace(upgrade_dir)
self.assertTrue(os.path.islink(file_d))
self.assertEqual(file_c, os.readlink(file_d))
# File pointed to by symlink is in a different directory.
# Therefore, it should not be upgraded.
with open(file_c, "r") as f:
self.assertEqual("import foo as f", f.read())
def testUpgradeCopyWithSymlink(self):
if os.name == "nt":
self.skipTest("os.symlink doesn't work uniformly on Windows.")
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
output_dir = os.path.join(self.get_temp_dir(), "bar")
os.mkdir(upgrade_dir)
file_a = os.path.join(upgrade_dir, "a.py")
file_b = os.path.join(upgrade_dir, "b.py")
with open(file_a, "a") as f:
f.write("import foo as f")
os.symlink(file_a, file_b)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree(upgrade_dir, output_dir, copy_other_files=True)
new_file_a = os.path.join(output_dir, "a.py")
new_file_b = os.path.join(output_dir, "b.py")
self.assertTrue(os.path.islink(new_file_b))
self.assertEqual(new_file_a, os.readlink(new_file_b))
with open(new_file_a, "r") as f:
self.assertEqual("import bar as f", f.read())
def testUpgradeCopyWithSymlinkInDifferentDir(self):
if os.name == "nt":
self.skipTest("os.symlink doesn't work uniformly on Windows.")
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
other_dir = os.path.join(self.get_temp_dir(), "bar")
output_dir = os.path.join(self.get_temp_dir(), "baz")
os.mkdir(upgrade_dir)
os.mkdir(other_dir)
file_a = os.path.join(other_dir, "a.py")
file_b = os.path.join(upgrade_dir, "b.py")
with open(file_a, "a") as f:
f.write("import foo as f")
os.symlink(file_a, file_b)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree(upgrade_dir, output_dir, copy_other_files=True)
new_file_b = os.path.join(output_dir, "b.py")
self.assertTrue(os.path.islink(new_file_b))
self.assertEqual(file_a, os.readlink(new_file_b))
with open(file_a, "r") as f:
self.assertEqual("import foo as f", f.read())
if __name__ == "__main__":
test_lib.main()
| |
from sklearn.linear_model import LogisticRegression as MaxEnt
import copy
import random
import collections
from itertools import izip
import constants
import helper
import warnings
from predict import evaluatePrediction
import pdb
warnings.filterwarnings("ignore")
class Classifier(object):
def __init__(self, TRAIN_ENTITIES, TRAIN_CONFIDENCES, TRAIN_COSINE_SIM, TRAIN_CONTEXT,\
TEST_ENTITIES, TEST_CONFIDENCES, TEST_COSINE_SIM, TEST_CONTEXT):
self.TRAIN_ENTITIES = TRAIN_ENTITIES
self.TRAIN_CONFIDENCES = TRAIN_CONFIDENCES
self.TRAIN_COSINE_SIM = TRAIN_COSINE_SIM
self.TRAIN_CONTEXT = TRAIN_CONTEXT
self.TEST_ENTITIES = TEST_ENTITIES
self.TEST_CONFIDENCES = TEST_CONFIDENCES
self.TEST_COSINE_SIM = TEST_COSINE_SIM
self.TEST_CONTEXT = TEST_CONTEXT
self.match_orig_feature = True
self.print_query_scores = False
def getFeatures(self, article_index, query_index, supporting_article_index, entities, confidences, cosine_sim, context):
features= []
#Construct feature vector for this sampled entity
original_confidence = confidences[article_index][query_index][0]
confidence = confidences[article_index][query_index][supporting_article_index]
confidence_dif = [confidence[i] - original_confidence[i] for i in range(len(confidence))]
confidence_bool = [confidence[i] > original_confidence[i] for i in range(len(confidence))]
orig_confidence_thresh = [original_confidence[i] < .4 for i in range(len(confidence))]
confidence_thresh = [confidence[i] > .6 for i in range(len(confidence))]
#One hot vector to show if entity matches orginal
original_entity = entities[article_index][query_index][0]
new_entity = entities[article_index][query_index][supporting_article_index]
match_features = []
for e_index in range(len(original_entity)):
if original_entity[e_index] == '' or original_entity[e_index] == 'unknown':
match_features += [0, 0]
elif original_entity[e_index].strip().lower() == new_entity[e_index].strip().lower():
match_features += [1, 0]
else:
match_features += [0, 1]
# Cosine sim array is shifted by one.
# Index 0 should be 1 as orig is same as itself.
tfidf = 1 if supporting_article_index == 0 else \
cosine_sim[article_index]\
[query_index][supporting_article_index - 1]
features = original_confidence+ confidence + match_features + [tfidf]
if tfidf > .3:
features += [1]
else:
features += [0]
features += confidence_dif
# features += confidence_bool
features += confidence_thresh
features += orig_confidence_thresh
for c in context[article_index][query_index][supporting_article_index]:
features += c
return features
def trainClassifier(self, train_identifiers, num_entities):
classifier = MaxEnt(solver="lbfgs", verbose=1)
X = []
Y = []
num_neg = 0
max_neg = 1000 ## Set empiracally
for article_index in range(len(self.TRAIN_ENTITIES)):
article = self.TRAIN_ENTITIES[article_index]
for query_index in range(len(article)):
query = article[query_index]
for supporting_article_index in range(len(query)):
features = self.getFeatures(article_index, query_index, supporting_article_index, \
self.TRAIN_ENTITIES, self.TRAIN_CONFIDENCES,self.TRAIN_COSINE_SIM, self.TRAIN_CONTEXT)
if constants.mode == "EMA":
labels = self.getLabelsEMA(article_index, query_index, supporting_article_index, \
self.TRAIN_ENTITIES, train_identifiers)
none_desicion = 4
else:
labels = self.getLabelsShooter(article_index, query_index, supporting_article_index, \
self.TRAIN_ENTITIES, train_identifiers)
none_desicion = 5
for label in labels:
if label == none_desicion:
if num_neg < max_neg:
num_neg+=1
X.append(features)
Y.append(label)
else:
X.append(features)
Y.append(label)
assert( len(X) == len(Y))
classifier.fit(X,Y)
print "Class dist", [sum([y == i for y in Y])for i in range(5)]
print "Total labels", len(Y)
return classifier
def getLabelsEMA(self, article_index, query_index, supporting_article_index, entities, identifier):
#Extract out label for this article (ie. is label correct)
labels = []
gold_entities = identifier[article_index]
new_entities = entities[article_index][query_index][supporting_article_index]
orig_entities = entities[article_index][query_index][0]
for ind in range(len(gold_entities)):
ent = new_entities[ind].lower().strip()
orig_ent = orig_entities[ind].lower().strip()
gold = gold_entities[ind].lower().strip()
match = evaluatePrediction(ent, gold)
orig_match = evaluatePrediction(orig_ent, gold)
if match == 1:
labels.append(ind)
# if not orig_match == 1:
# labels.append(ind)
if set(labels) == set([0, 1, 2]):
labels = [3]
elif labels == []:
labels = [4]
assert (len(labels) > 0)
return labels
def getLabelsShooter(self, article_index, query_index, supporting_article_index, entities, identifier):
#Extract out label for this article (ie. is label correct)
labels = []
gold_entities = identifier[article_index]
new_entities = entities[article_index][query_index][supporting_article_index]
orig_entities = entities[article_index][query_index][0]
for ind in range(len(gold_entities)):
ent = new_entities[ind].lower().strip()
orig_ent = orig_entities[ind].lower().strip()
gold = gold_entities[ind].lower().strip()
if gold == "":
continue
if ent == "":
continue
#special handling for shooterName (entity_index = 0)
if ind == 0:
new_person = set(ent.split('|'))
gold_person = set(gold.split('|'))
if len(new_person.intersection(gold_person)) > 0:
if not ent == orig_ent:
labels.append(ind)
else:
if gold == ent:
if not ent == orig_ent:
labels.append(ind)
if labels == [0, 1, 2, 3]:
labels = [4]
elif labels == []:
labels = [5]
assert (len(labels) > 0)
return labels
def predictEntities(self, classifier, test_identifiers):
if constants.mode == "Shooter":
predictions = [0,0,0,0,0,0]
take_all = 4
num_ents = 4
else:
predictions = [0,0,0,0,0]
take_all = 3
num_ents = 3
DECISIONS = copy.deepcopy(self.TEST_ENTITIES)
i = 0
for article_index in range(len(self.TEST_ENTITIES)):
article = self.TEST_ENTITIES[article_index]
for query_index in range(len(article)):
query = article[query_index]
for supporting_article_index in range(len(query)):
if supporting_article_index == 0:
DECISIONS[article_index][query_index]\
[supporting_article_index] = [1] * num_ents
continue
DECISIONS[article_index][query_index]\
[supporting_article_index] = [0] * num_ents
features = self.getFeatures(article_index, query_index, supporting_article_index, self.TEST_ENTITIES, self.TEST_CONFIDENCES,\
self.TEST_COSINE_SIM, self.TEST_CONTEXT)
# print 'query[supporting_article_index]', query[supporting_article_index]
if query[supporting_article_index] == ['unknown', 'unknown', 'unknown'] or\
query[supporting_article_index] == ['', '', '', '']:
continue
prediction = classifier.predict(features)[0]
predictions[prediction] += 1
if prediction < take_all:
if query[supporting_article_index][prediction] == "unknown":
continue
DECISIONS[article_index][query_index]\
[supporting_article_index][prediction] = 1
# if prediction == 0 or prediction == 2:
# print '------ in prediction -----'
# print "Chose to replace"
# print 'prediction is', prediction
# print 'orig', query[0]
# print 'supp', query[supporting_article_index]
# print 'gold',test_identifiers[article_index]
# print 'label', prediction
# print
# raw_input()
elif prediction == take_all:
if "unknown" in query[supporting_article_index]:
continue
DECISIONS[article_index][query_index]\
[supporting_article_index] = [1] * num_ents
print "predictions", predictions
return DECISIONS
#Run both Max Confidence and Majority Aggregation Schemes given the decisions
#Return the decided tag for each query
def aggregateResults(self, DECISIONS, num_entities):
majority = []
max_conf = []
for article_index in range(len(self.TEST_ENTITIES)):
for entity_index in range(num_entities):
if entity_index == 0:
max_conf.append([])
majority.append([])
article = self.TEST_ENTITIES[article_index]
tag_occurances = {}
max_confidence = -1
max_confidence_tag = ''
for query_index in range(len(article)):
query = article[query_index]
for supporting_article_index in range(len(query)):
supporting_article = query[supporting_article_index]
if DECISIONS[article_index][query_index][supporting_article_index][entity_index] == 0:
continue
confidence = self.TEST_CONFIDENCES[article_index][query_index]\
[supporting_article_index][entity_index]
entity = supporting_article[entity_index].strip().lower()
##Update counts of majority
if entity not in tag_occurances:
tag_occurances[entity] = 1
else:
tag_occurances[entity] += 1
##Update max_confidence
if confidence > max_confidence:
max_confidence = confidence
max_confidence_tag = entity
max_majority_count = -1
majority_tag = ''
for ent in tag_occurances:
if tag_occurances[ent] > max_majority_count:
max_majority_count = tag_occurances[ent]
majority_tag = ent
max_conf[article_index].append(max_confidence_tag)
majority[article_index].append(majority_tag)
print 'len(majority)', len(majority)
print 'len(DECISIONS)', len(DECISIONS)
assert len(majority) == len(DECISIONS)
assert len(max_conf) == len(majority)
assert len(majority[0]) == num_entities
return majority, max_conf
def evaluateBaseline(self, predicted_identifiers, test_identifiers, num_entities, COUNT_ZERO):
predicted_correct = [0.] * num_entities
total_predicted = [0.] * num_entities
total_gold = [0.] * num_entities
print 'len(test_identifiers)', len(test_identifiers)
print 'num_entities', num_entities
for entity_index in range(num_entities):
for article_index in range(len(predicted_identifiers)):
predicted = predicted_identifiers[article_index][entity_index].strip().lower()
gold = test_identifiers[article_index][entity_index].strip().lower()
orig = self.TEST_ENTITIES[article_index][0][0][entity_index].strip().lower()
match = evaluatePrediction(predicted, gold)
if match == 'skip':
continue
else:
total_gold[entity_index] += 1
if match == "no_predict":
continue
if match == 1:
predicted_correct[entity_index] += 1
total_predicted[entity_index] += 1
helper.printScores(predicted_correct, total_predicted,total_gold)
def evaluateSansBaseline(self, predicted_identifiers, test_identifiers, num_entities, COUNT_ZERO):
predicted_correct = [0.] * num_entities
total_predicted = [0.] * num_entities
total_gold = [0.] * num_entities
print 'len(test_identifiers)', len(test_identifiers)
print 'num_entities', num_entities
for entity_index in range(num_entities):
for article_index in range(len(predicted_identifiers)):
predicted = self.TEST_ENTITIES[article_index][0][0][entity_index].strip().lower()
gold = test_identifiers[article_index][entity_index].strip().lower()
match = evaluatePrediction(predicted, gold)
if match == 'skip':
continue
else:
total_gold[entity_index] += 1
if match == "no_predict":
continue
if match == 1:
predicted_correct[entity_index] += 1
total_predicted[entity_index] += 1
helper.printScores(predicted_correct, total_predicted,total_gold)
def trainAndEval(self, train_identifiers, test_identifiers, num_entities, COUNT_ZERO):
classifier = self.trainClassifier(train_identifiers, num_entities)
DECISIONS = self.predictEntities(classifier, test_identifiers)
print "#############################################################"
print "Evaluation for Classifier baseline with SANS aggregation"
self.evaluateSansBaseline(DECISIONS, test_identifiers, num_entities, COUNT_ZERO)
majority, max_conf = self.aggregateResults(DECISIONS, num_entities)
print "#############################################################"
print "Evaluation for Classifier baseline with MAJORITY aggregation"
self.evaluateBaseline(majority, test_identifiers, num_entities, COUNT_ZERO)
print "#############################################################"
print "Evaluation for Classifier baseline with MAX CONFIDENCE aggregation"
self.evaluateBaseline(max_conf, test_identifiers, num_entities, COUNT_ZERO)
| |
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import MagicMock
from mock import patch
from mock import call
from diamond.collector import Collector
from tokumx import TokuMXCollector
##########################################################################
def run_only_if_pymongo_is_available(func):
try:
import pymongo
except ImportError:
pymongo = None
pred = lambda: pymongo is not None
return run_only(func, pred)
class TestTokuMXCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('TokuMXCollector', {
'host': 'localhost:27017',
'databases': '^db',
})
self.collector = TokuMXCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(TokuMXCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.assertPublishedMany(publish_mock, {
'more_keys.nested_key': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_once_with('dbStats')
metrics = {
'db_keys.db_nested_key': 1,
'dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.assertPublishedMany(publish_mock, {
'more_keys': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.connection['db1'].collection_names.assert_called_once_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in self.connection['db1'].command.call_args_list # NOQA
metrics = {
'databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
class TestMongoMultiHostDBCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('TokuMXCollector', {
'hosts': ['localhost:27017', 'localhost:27057'],
'databases': '^db',
})
self.collector = TokuMXCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(TokuMXCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('engineStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys.nested_key': 1,
'localhost_27057.more_keys.nested_key': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_with('dbStats')
metrics = {
'localhost_27017.db_keys.db_nested_key': 1,
'localhost_27057.db_keys.db_nested_key': 1,
'localhost_27017.dbkey': 2,
'localhost_27057.dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('engineStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys': 1,
'localhost_27057.more_keys': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.connection['db1'].collection_names.assert_called_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in self.connection['db1'].command.call_args_list # NOQA
metrics = {
'localhost_27017.databases.db1.collection1.key': 2,
'localhost_27057.databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
##########################################################################
if __name__ == "__main__":
unittest.main()
| |
import time
import gym
import numpy as np
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import explained_variance, ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.tf_util import total_episode_reward_logger
from stable_baselines.common.math_util import safe_mean
class PPO2(ActorCriticRLModel):
"""
Proximal Policy Optimization algorithm (GPU version).
Paper: https://arxiv.org/abs/1707.06347
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) Discount factor
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param ent_coef: (float) Entropy coefficient for the loss calculation
:param learning_rate: (float or callable) The learning rate, it can be a function
:param vf_coef: (float) Value function coefficient for the loss calculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param nminibatches: (int) Number of training minibatches per update. For recurrent policies,
the number of environments run in parallel should be a multiple of nminibatches.
:param noptepochs: (int) Number of epoch when optimizing the surrogate
:param cliprange: (float or callable) Clipping parameter, it can be a function
:param cliprange_vf: (float or callable) Clipping parameter for the value function, it can be a function.
This is a parameter specific to the OpenAI implementation. If None is passed (default),
then `cliprange` (that is used for the policy) will be used.
IMPORTANT: this clipping depends on the reward scaling.
To deactivate value function clipping (and recover the original PPO implementation),
you have to pass a negative value (e.g. -1).
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, n_steps=128, ent_coef=0.01, learning_rate=2.5e-4, vf_coef=0.5,
max_grad_norm=0.5, lam=0.95, nminibatches=4, noptepochs=4, cliprange=0.2, cliprange_vf=None,
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None):
self.learning_rate = learning_rate
self.cliprange = cliprange
self.cliprange_vf = cliprange_vf
self.n_steps = n_steps
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.gamma = gamma
self.lam = lam
self.nminibatches = nminibatches
self.noptepochs = noptepochs
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.action_ph = None
self.advs_ph = None
self.rewards_ph = None
self.old_neglog_pac_ph = None
self.old_vpred_ph = None
self.learning_rate_ph = None
self.clip_range_ph = None
self.entropy = None
self.vf_loss = None
self.pg_loss = None
self.approxkl = None
self.clipfrac = None
self._train = None
self.loss_names = None
self.train_model = None
self.act_model = None
self.value = None
self.n_batch = None
self.summary = None
super().__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
if _init_setup_model:
self.setup_model()
def _make_runner(self):
return Runner(env=self.env, model=self, n_steps=self.n_steps,
gamma=self.gamma, lam=self.lam)
def _get_pretrain_placeholders(self):
policy = self.act_model
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, self.action_ph, policy.policy
return policy.obs_ph, self.action_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO2 model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.n_batch = self.n_envs * self.n_steps
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
n_batch_step = None
n_batch_train = None
if issubclass(self.policy, RecurrentActorCriticPolicy):
assert self.n_envs % self.nminibatches == 0, "For recurrent policies, "\
"the number of environments run in parallel should be a multiple of nminibatches."
n_batch_step = self.n_envs
n_batch_train = self.n_batch // self.nminibatches
act_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs // self.nminibatches, self.n_steps, n_batch_train,
reuse=True, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.action_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
self.advs_ph = tf.placeholder(tf.float32, [None], name="advs_ph")
self.rewards_ph = tf.placeholder(tf.float32, [None], name="rewards_ph")
self.old_neglog_pac_ph = tf.placeholder(tf.float32, [None], name="old_neglog_pac_ph")
self.old_vpred_ph = tf.placeholder(tf.float32, [None], name="old_vpred_ph")
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
self.clip_range_ph = tf.placeholder(tf.float32, [], name="clip_range_ph")
neglogpac = train_model.proba_distribution.neglogp(self.action_ph)
self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())
vpred = train_model.value_flat
# Value function clipping: not present in the original PPO
if self.cliprange_vf is None:
# Default behavior (legacy from OpenAI baselines):
# use the same clipping as for the policy
self.clip_range_vf_ph = self.clip_range_ph
self.cliprange_vf = self.cliprange
elif isinstance(self.cliprange_vf, (float, int)) and self.cliprange_vf < 0:
# Original PPO implementation: no value function clipping
self.clip_range_vf_ph = None
else:
# Last possible behavior: clipping range
# specific to the value function
self.clip_range_vf_ph = tf.placeholder(tf.float32, [], name="clip_range_vf_ph")
if self.clip_range_vf_ph is None:
# No clipping
vpred_clipped = train_model.value_flat
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
vpred_clipped = self.old_vpred_ph + \
tf.clip_by_value(train_model.value_flat - self.old_vpred_ph,
- self.clip_range_vf_ph, self.clip_range_vf_ph)
vf_losses1 = tf.square(vpred - self.rewards_ph)
vf_losses2 = tf.square(vpred_clipped - self.rewards_ph)
self.vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(self.old_neglog_pac_ph - neglogpac)
pg_losses = -self.advs_ph * ratio
pg_losses2 = -self.advs_ph * tf.clip_by_value(ratio, 1.0 - self.clip_range_ph, 1.0 +
self.clip_range_ph)
self.pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
self.approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - self.old_neglog_pac_ph))
self.clipfrac = tf.reduce_mean(tf.cast(tf.greater(tf.abs(ratio - 1.0),
self.clip_range_ph), tf.float32))
loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef
tf.summary.scalar('entropy_loss', self.entropy)
tf.summary.scalar('policy_gradient_loss', self.pg_loss)
tf.summary.scalar('value_function_loss', self.vf_loss)
tf.summary.scalar('approximate_kullback-leibler', self.approxkl)
tf.summary.scalar('clip_factor', self.clipfrac)
tf.summary.scalar('loss', loss)
with tf.variable_scope('model'):
self.params = tf.trainable_variables()
if self.full_tensorboard_log:
for var in self.params:
tf.summary.histogram(var.name, var)
grads = tf.gradients(loss, self.params)
if self.max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph, epsilon=1e-5)
self._train = trainer.apply_gradients(grads)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_range_ph))
if self.clip_range_vf_ph is not None:
tf.summary.scalar('clip_range_vf', tf.reduce_mean(self.clip_range_vf_ph))
tf.summary.scalar('old_neglog_action_probability', tf.reduce_mean(self.old_neglog_pac_ph))
tf.summary.scalar('old_value_pred', tf.reduce_mean(self.old_vpred_ph))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', self.rewards_ph)
tf.summary.histogram('learning_rate', self.learning_rate_ph)
tf.summary.histogram('advantage', self.advs_ph)
tf.summary.histogram('clip_range', self.clip_range_ph)
tf.summary.histogram('old_neglog_action_probability', self.old_neglog_pac_ph)
tf.summary.histogram('old_value_pred', self.old_vpred_ph)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.proba_step = act_model.proba_step
self.value = act_model.value
self.initial_state = act_model.initial_state
tf.global_variables_initializer().run(session=self.sess) # pylint: disable=E1101
self.summary = tf.summary.merge_all()
def _train_step(self, learning_rate, cliprange, obs, returns, masks, actions, values, neglogpacs, update,
writer, states=None, cliprange_vf=None):
"""
Training of PPO2 Algorithm
:param learning_rate: (float) learning rate
:param cliprange: (float) Clipping factor
:param obs: (np.ndarray) The current observation of the environment
:param returns: (np.ndarray) the rewards
:param masks: (np.ndarray) The last masks for done episodes (used in recurent policies)
:param actions: (np.ndarray) the actions
:param values: (np.ndarray) the values
:param neglogpacs: (np.ndarray) Negative Log-likelihood probability of Actions
:param update: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param states: (np.ndarray) For recurrent policies, the internal state of the recurrent model
:return: policy gradient loss, value function loss, policy entropy,
approximation of kl divergence, updated clipping range, training update operation
:param cliprange_vf: (float) Clipping factor for the value function
"""
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {self.train_model.obs_ph: obs, self.action_ph: actions,
self.advs_ph: advs, self.rewards_ph: returns,
self.learning_rate_ph: learning_rate, self.clip_range_ph: cliprange,
self.old_neglog_pac_ph: neglogpacs, self.old_vpred_ph: values}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.dones_ph] = masks
if cliprange_vf is not None and cliprange_vf >= 0:
td_map[self.clip_range_vf_ph] = cliprange_vf
if states is None:
update_fac = max(self.n_batch // self.nminibatches // self.noptepochs, 1)
else:
update_fac = max(self.n_batch // self.nminibatches // self.noptepochs // self.n_steps, 1)
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if self.full_tensorboard_log and (1 + update) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % (update * update_fac))
else:
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map)
writer.add_summary(summary, (update * update_fac))
else:
policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train], td_map)
return policy_loss, value_loss, policy_entropy, approxkl, clipfrac
def set_env(self,env):
super().set_env(env)
self.n_batch = self.n_envs * self.n_steps
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2",
reset_num_timesteps=True):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
cliprange_vf = get_schedule_fn(self.cliprange_vf)
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
t_first_start = time.time()
n_updates = total_timesteps // self.n_batch
callback.on_training_start(locals(), globals())
for update in range(1, n_updates + 1):
assert self.n_batch % self.nminibatches == 0, ("The number of minibatches (`nminibatches`) "
"is not a factor of the total number of samples "
"collected per rollout (`n_batch`), "
"some samples won't be used."
)
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - (update - 1.0) / n_updates
lr_now = self.learning_rate(frac)
cliprange_now = self.cliprange(frac)
cliprange_vf_now = cliprange_vf(frac)
callback.on_rollout_start()
# true_reward is the reward without discount
rollout = self.runner.run(callback)
# Unpack
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = rollout
callback.on_rollout_end()
# Early stopping due to the callback
if not self.runner.continue_training:
break
self.ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
update_fac = max(self.n_batch // self.nminibatches // self.noptepochs, 1)
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
timestep = self.num_timesteps // update_fac + ((epoch_num *
self.n_batch + start) // batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, writer=writer,
update=timestep, cliprange_vf=cliprange_vf_now))
else: # recurrent version
update_fac = max(self.n_batch // self.nminibatches // self.noptepochs // self.n_steps, 1)
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for start in range(0, self.n_envs, envs_per_batch):
timestep = self.num_timesteps // update_fac + ((epoch_num *
self.n_envs + start) // envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, update=timestep,
writer=writer, states=mb_states,
cliprange_vf=cliprange_vf_now))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, self.num_timesteps)
if self.verbose >= 1 and (update % log_interval == 0 or update == 1):
explained_var = explained_variance(values, returns)
logger.logkv("serial_timesteps", update * self.n_steps)
logger.logkv("n_updates", update)
logger.logkv("total_timesteps", self.num_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"lam": self.lam,
"nminibatches": self.nminibatches,
"noptepochs": self.noptepochs,
"cliprange": self.cliprange,
"cliprange_vf": self.cliprange_vf,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def _run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
for _ in range(self.n_steps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones) # pytype: disable=attribute-error
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
self.model.num_timesteps += self.n_envs
if self.callback is not None:
# Abort training early
self.callback.update_locals(locals())
if self.callback.on_step() is False:
self.continue_training = False
# Return dummy values
return [None] * 9
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones) # pytype: disable=attribute-error
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def swap_and_flatten(arr):
"""
swap and then flatten axes 0 and 1
:param arr: (np.ndarray)
:return: (np.ndarray)
"""
shape = arr.shape
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
| |
# coding: utf-8
"""
Deals with generating the site-wide navigation.
This consists of building a set of interlinked page and header objects.
"""
from __future__ import unicode_literals
import datetime
import logging
import os
from mkdocs import utils, exceptions
log = logging.getLogger(__name__)
def filename_to_title(filename):
"""
Automatically generate a default title, given a filename.
"""
if utils.is_homepage(filename):
return 'Home'
return utils.filename_to_title(filename)
class SiteNavigation(object):
def __init__(self, pages_config, use_directory_urls=True):
self.url_context = URLContext()
self.file_context = FileContext()
self.nav_items, self.pages = _generate_site_navigation(
pages_config, self.url_context, use_directory_urls)
self.homepage = self.pages[0] if self.pages else None
self.use_directory_urls = use_directory_urls
def __str__(self):
return ''.join([str(item) for item in self])
def __iter__(self):
return iter(self.nav_items)
def walk_pages(self):
"""
Returns each page in the site in turn.
Additionally this sets the active status of the pages and headers,
in the site navigation, so that the rendered navbar can correctly
highlight the currently active page and/or header item.
"""
page = self.homepage
page.set_active()
self.url_context.set_current_url(page.abs_url)
self.file_context.set_current_path(page.input_path)
yield page
while page.next_page:
page.set_active(False)
page = page.next_page
page.set_active()
self.url_context.set_current_url(page.abs_url)
self.file_context.set_current_path(page.input_path)
yield page
page.set_active(False)
@property
def source_files(self):
if not hasattr(self, '_source_files'):
self._source_files = set([page.input_path for page in self.pages])
return self._source_files
class URLContext(object):
"""
The URLContext is used to ensure that we can generate the appropriate
relative URLs to other pages from any given page in the site.
We use relative URLs so that static sites can be deployed to any location
without having to specify what the path component on the host will be
if the documentation is not hosted at the root path.
"""
def __init__(self):
self.base_path = '/'
def set_current_url(self, current_url):
self.base_path = os.path.dirname(current_url)
def make_relative(self, url):
"""
Given a URL path return it as a relative URL,
given the context of the current page.
"""
suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
# Workaround for bug on `os.path.relpath()` in Python 2.6
if self.base_path == '/':
if url == '/':
# Workaround for static assets
return '.'
return url.lstrip('/')
# Under Python 2.6, relative_path adds an extra '/' at the end.
relative_path = os.path.relpath(url, start=self.base_path)
relative_path = relative_path.rstrip('/') + suffix
return utils.path_to_url(relative_path)
class FileContext(object):
"""
The FileContext is used to ensure that we can generate the appropriate
full path for other pages given their relative path from a particular page.
This is used when we have relative hyperlinks in the documentation, so that
we can ensure that they point to markdown documents that actually exist
in the `pages` config.
"""
def __init__(self):
self.current_file = None
self.base_path = ''
def set_current_path(self, current_path):
self.current_file = current_path
self.base_path = os.path.dirname(current_path)
def make_absolute(self, path):
"""
Given a relative file path return it as a POSIX-style
absolute filepath, given the context of the current page.
"""
return os.path.normpath(os.path.join(self.base_path, path))
class Page(object):
def __init__(self, title, url, path, url_context):
self.title = title
self.abs_url = url
self.active = False
self.url_context = url_context
self.update_date = datetime.datetime.now().strftime("%Y-%m-%d")
# Relative paths to the input markdown file and output html file.
self.input_path = path
self.output_path = utils.get_html_path(path)
# Links to related pages
self.previous_page = None
self.next_page = None
self.ancestors = []
@property
def url(self):
return self.url_context.make_relative(self.abs_url)
@property
def is_homepage(self):
return utils.is_homepage(self.input_path)
@property
def is_top_level(self):
return len(self.ancestors) == 0
def __str__(self):
return self.indent_print()
def indent_print(self, depth=0):
indent = ' ' * depth
active_marker = ' [*]' if self.active else ''
title = self.title if (self.title is not None) else '[blank]'
return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker)
def set_active(self, active=True):
self.active = active
for ancestor in self.ancestors:
ancestor.set_active(active)
class Header(object):
def __init__(self, title, children):
self.title, self.children = title, children
self.active = False
self.ancestors = []
def __str__(self):
return self.indent_print()
@property
def is_top_level(self):
return len(self.ancestors) == 0
def indent_print(self, depth=0):
indent = ' ' * depth
active_marker = ' [*]' if self.active else ''
ret = '%s%s%s\n' % (indent, self.title, active_marker)
for item in self.children:
ret += item.indent_print(depth + 1)
return ret
def set_active(self, active=True):
self.active = active
for ancestor in self.ancestors:
ancestor.set_active(active)
def _path_to_page(path, title, url_context, use_directory_urls):
if title is None:
title = filename_to_title(path.split(os.path.sep)[-1])
url = utils.get_url_path(path, use_directory_urls)
return Page(title=title, url=url, path=path,
url_context=url_context)
def _follow(config_line, url_context, use_dir_urls, header=None, title=None):
if isinstance(config_line, utils.string_types):
path = os.path.normpath(config_line)
page = _path_to_page(path, title, url_context, use_dir_urls)
if header:
page.ancestors = [header]
header.children.append(page)
yield page
raise StopIteration
elif not isinstance(config_line, dict):
msg = ("Line in 'page' config is of type {0}, dict or string "
"expected. Config: {1}").format(type(config_line), config_line)
raise exceptions.ConfigurationError(msg)
if len(config_line) > 1:
raise exceptions.ConfigurationError(
"Page configs should be in the format 'name: markdown.md'. The "
"config contains an invalid entry: {0}".format(config_line))
elif len(config_line) == 0:
log.warning("Ignoring empty line in the pages config.")
raise StopIteration
next_cat_or_title, subpages_or_path = next(iter(config_line.items()))
if isinstance(subpages_or_path, utils.string_types):
path = subpages_or_path
for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):
yield sub
raise StopIteration
elif not isinstance(subpages_or_path, list):
msg = ("Line in 'page' config is of type {0}, list or string "
"expected for sub pages. Config: {1}"
).format(type(config_line), config_line)
raise exceptions.ConfigurationError(msg)
next_header = Header(title=next_cat_or_title, children=[])
if header:
next_header.ancestors = [header]
header.children.append(next_header)
yield next_header
subpages = subpages_or_path
for subpage in subpages:
for sub in _follow(subpage, url_context, use_dir_urls, next_header):
yield sub
def _generate_site_navigation(pages_config, url_context, use_dir_urls=True):
"""
Returns a list of Page and Header instances that represent the
top level site navigation.
"""
nav_items = []
pages = []
previous = None
for config_line in pages_config:
for page_or_header in _follow(
config_line, url_context, use_dir_urls):
if isinstance(page_or_header, Header):
if page_or_header.is_top_level:
nav_items.append(page_or_header)
elif isinstance(page_or_header, Page):
if page_or_header.is_top_level:
nav_items.append(page_or_header)
pages.append(page_or_header)
if previous:
page_or_header.previous_page = previous
previous.next_page = page_or_header
previous = page_or_header
if len(pages) == 0:
raise exceptions.ConfigurationError(
"No pages found in the pages config. "
"Remove it entirely to enable automatic page discovery.")
return (nav_items, pages)
| |
# -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL for the current
request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNA - Service example - Settings constants."""
from dna_project_settings import GCS_PROJECT_ROOT
from utils import SelfIncreasingIndex
SERVICE_NAME = "SERVICE-EXAMPLE"
# BigQuery Dataset and table names.
GBQ_DATASET = "service_example_dataset"
GBQ_TABLE = "dcm_data"
# Cloud Storage bucket name.
GCS_BUCKET = "%s-service-example" % GCS_PROJECT_ROOT
# Main script to run on the Compute Engine instance.
GCE_RUN_SCRIPT = "./service-example-run.sh"
# DCM report default values.
DCM_REPORT_NAME = "ServiceExampleReport"
DCM_REPORT_DATE_RANGE = "YESTERDAY"
INDEX = SelfIncreasingIndex()
# DCM Report template structure.
DCM_REPORT_TEMPLATE = {
"name": "<PLACEHOLDER>",
"type": "STANDARD",
"fileName": "<PLACEHOLDER>",
"format": "CSV",
"criteria": {
"dateRange": {
"relativeDateRange": "<PLACEHOLDER>"
},
"dimensions": [{
"name": "dfa:advertiser"
}, {
"name": "dfa:advertiserId"
}, {
"name": "dfa:campaign"
}, {
"name": "dfa:campaignId"
}, {
"name": "dfa:placementSize"
}, {
"name": "dfa:creativeType"
}, {
"name": "dfa:creativeSize"
}, {
"name": "dfa:platformType"
}, {
"name": "dfa:site"
}, {
"name": "dfa:month"
}, {
"name": "dfa:week"
}, {
"name": "dfa:date"
}],
"metricNames": [
"dfa:clicks",
"dfa:impressions",
"dfa:activeViewAverageViewableTimeSecond",
"dfa:activeViewEligibleImpressions",
"dfa:activeViewMeasurableImpressions",
"dfa:activeViewViewableImpressions",
]
}
}
# Field map structure.
FIELD_MAP_STANDARD = {
"Advertiser": {
"idx": INDEX.start()
},
"AdvertiserID": {
"idx": INDEX.nextval()
},
"Campaign": {
"idx": INDEX.nextval()
},
"CampaignID": {
"idx": INDEX.nextval()
},
"PlacementSize": {
"idx": INDEX.nextval()
},
"CreativeType": {
"idx": INDEX.nextval()
},
"CreativeSize": {
"idx": INDEX.nextval()
},
"PlatformType": {
"idx": INDEX.nextval()
},
"Site": {
"idx": INDEX.nextval()
},
"Month": {
"idx": INDEX.nextval()
},
"Week": {
"idx": INDEX.nextval()
},
"Date": {
"idx": INDEX.nextval()
},
"Clicks": {
"idx": INDEX.nextval()
},
"Impressions": {
"idx": INDEX.nextval()
},
"ViewableTimeSeconds": {
"idx": INDEX.nextval()
},
"EligibleImpressions": {
"idx": INDEX.nextval()
},
"MeasurableImpressions": {
"idx": INDEX.nextval()
},
"ViewableImpressions": {
"idx": INDEX.nextval()
},
}
# Table Data schema
DATA_SCHEMA_STANDARD = {
"fields": [
{
"name": "Advertiser",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "AdvertiserID",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "Campaign",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "CampaignID",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "PlacementSize",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "CreativeType",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "CreativeSize",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "PlatformType",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "Site",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "Month",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "Week",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "Date",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "Clicks",
"type": "INTEGER",
"mode": "NULLABLE"
},
{
"name": "Impressions",
"type": "INTEGER",
"mode": "NULLABLE"
},
{
"name": "ViewableTimeSeconds",
"type": "FLOAT",
"mode": "NULLABLE"
},
{
"name": "EligibleImpressions",
"type": "INTEGER",
"mode": "NULLABLE"
},
{
"name": "MeasurableImpressions",
"type": "INTEGER",
"mode": "NULLABLE"
},
{
"name": "ViewableImpressions",
"type": "INTEGER",
"mode": "NULLABLE"
},
]
}
| |
"""Convert a csv file to an sqlite table."""
import sys
import csv
import sqlite3
import os
import re
# pointer to our csv file descriptor
csvFile = None
columnNames = None
columnTypes = None
columnComments = None
validDataTypes = ["string", "number", "date"]
idColumnName = "_id"
outfileName = None
outfileExtension = ".sqlite3"
tableName = None
def confirm(default=True):
"""Waits for user input, and exits on anything other than a string
that begins with "Y" or "y".
@param [default=True] {Boolean} Default response displayed to the user.
Either "[Y/n]:" (if True) for a default affirmative or "[y/N]:" (if False)
for a default negative.
@return {Boolean} True if the user typed in an affirmative response,
False if not.
"""
if default == True:
print "[Y/n]: ",
else:
print "[n/Y]: ",
response = raw_input()
if len(response) == 0:
return default
elif len(response) and (response.lower()[0] == "y"):
return True
else:
return False
def createTable():
"""Create the sqllite3 table and insert data."""
global idColumnName, columnNames, columnTypes, outfileName
print "\033[1;43m--Building data table--\033[1;m"
print "SQL statements used will be output to the screen for inspection."
print ""
conn = sqlite3.connect(outfileName)
cursor = conn.cursor()
# TODO: confirm with user (default no) before dropping the table
cursor.execute("DROP TABLE IF EXISTS "+tableName)
statement = "CREATE TABLE "+tableName+" ("+idColumnName+" INTEGER PRIMARY KEY ASC\n"
for i in range(len(columnNames)):
statement += ", "+columnNames[i]
if columnTypes[i] == "String":
statement += " TEXT\n"
elif columnTypes[i] == "Number":
statement += " NUMERIC\n"
statement += ")"
print statement
print ""
cursor.execute(statement)
conn.commit()
# Insert Data
csvFile.seek(0)
dataReader = csv.reader(csvFile)
# skip the header rows
counter = 0
for row in dataReader:
if counter < 3:
counter += 1
continue
else:
statement = "INSERT INTO "+tableName+" ("
# skip the id column, let it auto-increment
firstColumn = True
for column in columnNames:
if firstColumn == True:
statement += column
firstColumn = False
else:
statement += ", "+column
statement += ") VALUES ("
firstValue = True
for columnNum in range(len(row)):
# Need to get access to the column types to determine if we
# should quote or not
if firstValue:
firstValue = False
if columnTypes[columnNum] == "String":
statement += "'"+row[columnNum]+"'"
elif columnTypes[columnNum] == "Number":
statement += row[columnNum]
else:
if columnTypes[columnNum] == "String":
statement += ", '"+row[columnNum]+"'"
elif columnTypes[columnNum] == "Number":
statement += ", "+row[columnNum]
statement += ")"
print statement
cursor.execute(statement)
conn.commit()
# clean up
cursor.close()
conn.close()
def computeSchema():
"""Determines the table schema for our csv file."""
global csvFile, columnNames, columnTypes, columnComments
print "\033[1;43m--Computing schema--\033[1;m"
csvFile.seek(0)
schema = csv.reader(csvFile)
counter = 0
for row in schema:
if counter == 0:
columnNames = row
elif counter == 1:
columnTypes = row
elif counter == 2:
columnComments = row
break
counter += 1
print "We assume the first three rows in your csv file contain header info."
print "If the information looks incorrect, you will have an opportunity"
print "to exit and fix the csv file before creating the output table."
print "--------------------------------------------------------------------"
print "Your columns will be named (from the first row of data):"
for column in range(len(columnNames)):
print "{0:>5}: {1}".format(column, columnNames[column])
print "The data types for the columns (from the second row of data):"
for column in range(len(columnTypes)):
print "{0:>5}: {1}".format(column, columnTypes[column])
print "The descriptions of each column (from the third row of data):"
print "NOTE: Comments are ignored for sql table creation."
for column in range(len(columnComments)):
print "{0:>5}: {1}".format(column, columnComments[column])
print ""
def reportFileStats():
"""Report any stats about the csv file."""
# I think we need a new csv reader every time we want to view
# the file.
global csvFile, validDataTypes
print "\033[1;43m--Computing file stats, checking integrity--\033[1;m"
print "Number of columns in your table (determined from the first row):"
csvFile.seek(0)
columncount = 0
counter = csv.reader(csvFile)
for row in counter:
columncount = len(row)
break
print " {0}".format(columncount)
print "Number of rows in the csv file:"
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
for row in counter:
rowcount += 1
print " {0}".format(rowcount)
print "Check table integrity: expected number of columns per row?"
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
isBadTable = False
for row in counter:
if len(row) != columncount:
print "Error: row {0} has {1} columns, expected {2}".format(rowcount, len(row), columncount)
isBadTable = True
rowcount += 1
if isBadTable == False:
print "\033[1;32mTable integrity check PASS: expected dimensions.\033[1;m"
print ""
else:
print "\033[1;31mTable integrity check FAIL: unexpected dimensions.\033[1;m"
print ""
sys.exit(1)
print "Check table integrity: expected data types for each column?"
print "Valid datatypes are:"
for validType in validDataTypes:
print " {0}".format(validType)
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
isBadTable = False
for row in counter:
# data types are in the second row
if rowcount == 1:
columncount = 0
for column in row:
if column not in validDataTypes:
print "Error: column {0} has unexpected type {1}".format(columncount, column)
isBadTable = True
columncount += 1
# Only process the data type row
break
else:
rowcount += 1
if isBadTable == False:
print "\033[1;32mTable integrity check PASS: expected datatypes.\033[1;m"
print ""
else:
print "\033[1;31mTable integrity check FAIL: unexpected datatypes.\033[1;m"
print ""
sys.exit(1)
def init(filepath):
"""Kicks off the program by attempting to open the csv file."""
global csvFile, outfileName, tableName
# read stocks data, print status messages
try:
print "\033[1;43m--Opening csv file--\033[1;m"
csvFile = open(filepath, "rb")
print "\033[1;32mOpened csv file:", filepath,"\033[1;m"
# Figure out database name first
outfileMatches = re.match(r"([\w\S]*)(\.[^.]+$)", os.path.basename(filepath))
if outfileMatches == None:
# Handle the case where we don't have something that qualifies
# as an extension to the file
outfileName = filepath+outfileExtension
else:
outfileName = outfileMatches.group(1)+outfileExtension
# Figure out table name from the file name
tableName = re.match(r"([\w\S]*)(\.[^.]+$)", outfileName).group(1)
# Confirm the table and file names with the user
print "The sqlite3 table will be named:", tableName
print "NOTE: If this table already exists in the db file, the pre-existing"
print "data will be deleted (dropped) and lost."
print "Is", tableName, "the correct table name?"
if not confirm():
print "Please input a new table: "
tableName = raw_input()
print "Is", tableName, "the correct able name?"
if not confirm():
print "We must have a table name."
print ""
sys.exit()
print "The sqlite3 file will be named:", outfileName
print "Is this correct?"
if not confirm():
print "Please input the complete file and path to your sqlite3 db: "
outfileName = raw_input()
print "We will attempt to use the file at:", outfileName
print "Is this okay?"
if not confirm():
print "We need an output file."
print ""
sys.exit()
# TODO: choose a base table name, and inform the user that we will
# attempt to use this name as the table name in the database.
#
# TODO: prompt for okayness from the user, default yes
print ""
except IOError:
print "\033[1;31mFailed to open csv file:", sys.exc_info()[1],"\033[1;m"
print ""
sys.exit(1)
if __name__ == "__main__":
try:
if len(sys.argv) < 2:
print "Usage:"
print "python", sys.argv[0], "file2convert.csv"
sys.exit(1)
else:
# process the file
init(sys.argv[1])
reportFileStats()
computeSchema()
createTable()
# natural exit
sys.exit(0)
except SystemExit:
if csvFile:
# Make sure to close the file
csvFile.close()
print "Exiting program."
| |
# -*- coding: latin-1 -*-
import random
import sys
sys.path.append("..") #so other modules can be found in parent dir
from Player import *
from Constants import *
from Construction import CONSTR_STATS
from Ant import UNIT_STATS
from Move import Move
from GameState import addCoords
from AIPlayerUtils import *
##
#AIPlayer
#Description: The responsbility of this class is to interact with the game by
#deciding a valid move based on a given game state. This class has methods that
#have been implemented by Elise Sunderland and Matt Garcia
#
#Variables:
# playerId - The id of the player.
##
class AIPlayer(Player):
#__init__
#Description: Creates a new Player
#
#Parameters:
# inputPlayerId - The id to give the new player (int)
##
def __init__(self, inputPlayerId):
super(AIPlayer,self).__init__(inputPlayerId, "Heuristic Worker")
#Useful instance variables
self.myFood = None
self.myFood1 = None
self.myFood2 = None
self.myTunnel = None
self.myAnthill = None
self.enemyAnthill = None
self.enemyTunnel = None
##
#getPlacement
#
# This agent tries to place the food in the least optimal places
# The Constructions are placed to try and be as balanced as it can.
#
#Adjust this to set some more optimal placement
def getPlacement(self, currentState):
#keep track of these
self.myFood = None
self.myFood1 = None
self.myFood2 = None
self.myTunnel = None
self.enemyAnthill = None
self.enemyTunnel = None
self.constrCoords = None
if (currentState.whoseTurn == PLAYER_TWO):
#we are player 1
enemy = 1
else:
enemy = 0
if currentState.phase == SETUP_PHASE_1:
return [(2,1), (7, 2),
(6,3), (5,3), (0,3), (1,3), \
(2,3), (3,3), (4,3), \
(5,0), (9,0) ];
#set the enemy food to the least optimal places
elif currentState.phase == SETUP_PHASE_2:
self.enemyAnthill = getConstrList(currentState, enemy, (ANTHILL,))[0]
self.enemyTunnel = getConstrList(currentState, enemy, (TUNNEL,))[0]
numToPlace = 2
moves = []
for i in range(0, numToPlace):
for j in range(0,9):
for k in range (6,9):
move = None
#Find the farthest open space
if(((stepsToReach(currentState, (j,k), self.enemyTunnel.coords))\
+ (stepsToReach(currentState, (j,k), self.enemyAnthill.coords)))
> ((stepsToReach(currentState, moves, self.enemyTunnel.coords))\
+ (stepsToReach(currentState, moves, self.enemyAnthill.coords)))):
#Set the move if this space is empty
if currentState.board[j][k].constr == None and (j, k) not in moves:
move = (j, k)
#Just need to make the space non-empty. So I threw whatever I felt like in there.
currentState.board[j][k].constr == True
moves.append(move)
return moves
##
#getMove
#
# This agent builds 3 workers, and soldiers as necessary if the opponent is mean
##
def getMove(self, currentState):
#Useful pointers
myInv = getCurrPlayerInventory(currentState)
me = currentState.whoseTurn
foods = getConstrList(currentState, None, (FOOD,))
bestDistSoFar = 1000 #i.e., infinity
bestDistSoFar2 = 1000 #i.e., infinity
#keep track of our ants
numWorkers = len(getAntList(currentState, me, (WORKER,)))
numSoldiers = len(getAntList(currentState, me, (SOLDIER,)))
#the first time this method is called, the food and tunnel locations
#need to be recorded in their respective instance variables
if (self.myAnthill == None):
self.myAnthill = getConstrList(currentState, me, (ANTHILL,))[0]
if (self.myTunnel == None):
self.myTunnel = getConstrList(currentState, me, (TUNNEL,))[0]
if (self.myFood == None):
foods = getConstrList(currentState, None, (FOOD,))
self.myFood = foods[0]
#find the food closest to the tunnel
for food in foods:
dist = stepsToReach(currentState, self.myTunnel.coords, food.coords)
if (dist < bestDistSoFar):
self.myFood1 = food
bestDistSoFar = dist
#find the closest to the anthill
for food in foods:
dist2 = stepsToReach(currentState, self.myAnthill.coords, food.coords)
if (dist2 < bestDistSoFar2):
self.myFood2 = food
bestDistSoFar2 = dist2
#if the queen is on the anthill move her away to the back row
if (myInv.getQueen().coords == myInv.getAnthill().coords):
path = createPathToward(currentState, myInv.getQueen().coords,
(0,3), UNIT_STATS[QUEEN][MOVEMENT])
return Move(MOVE_ANT, path, None)
#if I have enough food, build more workers
#Don't build more than 3 workers.
if (myInv.foodCount > 1 and numWorkers < 3):
if (getAntAt(currentState, myInv.getAnthill().coords) is None):
return Move(BUILD, [myInv.getAnthill().coords], WORKER)
#Then Soldiers whenever we are able, if our opponent has any attacking units. Don't build more than 2 soldiers.
if not (getAntList(currentState, me+1, (DRONE, SOLDIER)) is None):
if (myInv.foodCount > 3 and numSoldiers < 2):
if (getAntAt(currentState, myInv.getAnthill().coords) is None):
return Move(BUILD, [myInv.getAnthill().coords], SOLDIER)
#WORKER Orders. They gather food as fast as they can.
myWorkers = getAntList(currentState, me, (WORKER,))
for index, worker in enumerate(myWorkers):
if (worker.hasMoved): continue
if(index == 0 or index == 2):
constrCoords = self.myTunnel.coords
targetFood = self.myFood1
else:
constrCoords = self.myAnthill.coords
targetFood = self.myFood2
#if the worker has food, move toward constr
if (worker.carrying):
path = createPathToward(currentState, worker.coords,
constrCoords, UNIT_STATS[WORKER][MOVEMENT])
if (path == [worker.coords]):
path = listAllMovementPaths(currentState,worker.coords, UNIT_STATS[WORKER][MOVEMENT])[0]
return Move(MOVE_ANT, path, None)
#if the worker has no food, move toward food
else:
path = createPathToward(currentState, worker.coords,
targetFood.coords, UNIT_STATS[WORKER][MOVEMENT])
if (path == [worker.coords]):
path = listAllMovementPaths(currentState,worker.coords, UNIT_STATS[WORKER][MOVEMENT])[0]
return Move(MOVE_ANT, path, None)
#SOLDIER orders. They move to the enemy side and bother the queen
mySoldiers = getAntList(currentState, me, (SOLDIER,))
for soldier in mySoldiers:
if not (soldier.hasMoved):
mySoldierX = soldier.coords[0]
mySoldierY = soldier.coords[1]
if (mySoldierY < 8): #move to enemy's side
mySoldierY += 1
else:
#find the queen
enemyQueen = getAntList(currentState, None, (QUEEN,))
#create a path toward the queen
soldierPath = createPathToward(currentState, (mySoldierX, mySoldierY),
enemyQueen[0].coords, UNIT_STATS[SOLDIER][MOVEMENT])
return Move(MOVE_ANT, soldierPath, None)
if (mySoldierX,mySoldierY) in listReachableAdjacent(currentState, soldier.coords, 2):
return Move(MOVE_ANT, [soldier.coords, (mySoldierX, mySoldierY)], None)
else:
return Move(MOVE_ANT, [soldier.coords], None)
#If our move hasnt been ended by now, end our move here.
return Move(END, None, None)
##
#getAttack
#
# This agent has no rhyme or reason to how it attacks. takes the first
# enemy it sees
#
def getAttack(self, currentState, attackingAnt, enemyLocations):
return enemyLocations[0] #don't care
##
#registerWin
#
# This agent doens't learn
#
def registerWin(self, hasWon):
#method templaste, not implemented
pass
| |
import usocket as socket
import ustruct as struct
from ubinascii import hexlify
class MQTTException(Exception):
pass
class MQTTClient:
def __init__(self, client_id, server, port=0, user=None, password=None, keepalive=0,
ssl=False, ssl_params={}):
if port == 0:
port = 8883 if ssl else 1883
self.client_id = client_id
self.sock = None
self.server = server
self.port = port
self.ssl = ssl
self.ssl_params = ssl_params
self.pid = 0
self.cb = None
self.user = user
self.pswd = password
self.keepalive = keepalive
self.lw_topic = None
self.lw_msg = None
self.lw_qos = 0
self.lw_retain = False
def _send_str(self, s):
self.sock.write(struct.pack("!H", len(s)))
self.sock.write(s)
def _recv_len(self):
n = 0
sh = 0
while 1:
b = self.sock.read(1)[0]
n |= (b & 0x7f) << sh
if not b & 0x80:
return n
sh += 7
def set_callback(self, f):
self.cb = f
def set_last_will(self, topic, msg, retain=False, qos=0):
assert 0 <= qos <= 2
assert topic
self.lw_topic = topic
self.lw_msg = msg
self.lw_qos = qos
self.lw_retain = retain
def connect(self, clean_session=True):
self.sock = socket.socket()
addr = socket.getaddrinfo(self.server, self.port)[0][-1]
self.sock.connect(addr)
if self.ssl:
import ussl
self.sock = ussl.wrap_socket(self.sock, **self.ssl_params)
premsg = bytearray(b"\x10\0\0\0\0\0")
msg = bytearray(b"\x04MQTT\x04\x02\0\0")
sz = 10 + 2 + len(self.client_id)
msg[6] = clean_session << 1
if self.user is not None:
sz += 2 + len(self.user) + 2 + len(self.pswd)
msg[6] |= 0xC0
if self.keepalive:
assert self.keepalive < 65536
msg[7] |= self.keepalive >> 8
msg[8] |= self.keepalive & 0x00FF
if self.lw_topic:
sz += 2 + len(self.lw_topic) + 2 + len(self.lw_msg)
msg[6] |= 0x4 | (self.lw_qos & 0x1) << 3 | (self.lw_qos & 0x2) << 3
msg[6] |= self.lw_retain << 5
i = 1
while sz > 0x7f:
premsg[i] = (sz & 0x7f) | 0x80
sz >>= 7
i += 1
premsg[i] = sz
self.sock.write(premsg, i + 2)
self.sock.write(msg)
#print(hex(len(msg)), hexlify(msg, ":"))
self._send_str(self.client_id)
if self.lw_topic:
self._send_str(self.lw_topic)
self._send_str(self.lw_msg)
if self.user is not None:
self._send_str(self.user)
self._send_str(self.pswd)
resp = self.sock.read(4)
assert resp[0] == 0x20 and resp[1] == 0x02
if resp[3] != 0:
raise MQTTException(resp[3])
return resp[2] & 1
def disconnect(self):
self.sock.write(b"\xe0\0")
self.sock.close()
def ping(self):
self.sock.write(b"\xc0\0")
def publish(self, topic, msg, retain=False, qos=0):
pkt = bytearray(b"\x30\0\0\0")
pkt[0] |= qos << 1 | retain
sz = 2 + len(topic) + len(msg)
if qos > 0:
sz += 2
assert sz < 2097152
i = 1
while sz > 0x7f:
pkt[i] = (sz & 0x7f) | 0x80
sz >>= 7
i += 1
pkt[i] = sz
#print(hex(len(pkt)), hexlify(pkt, ":"))
self.sock.write(pkt, i + 1)
self._send_str(topic)
if qos > 0:
self.pid += 1
pid = self.pid
struct.pack_into("!H", pkt, 0, pid)
self.sock.write(pkt, 2)
self.sock.write(msg)
if qos == 1:
while 1:
op = self.wait_msg()
if op == 0x40:
sz = self.sock.read(1)
assert sz == b"\x02"
rcv_pid = self.sock.read(2)
rcv_pid = rcv_pid[0] << 8 | rcv_pid[1]
if pid == rcv_pid:
return
elif qos == 2:
assert 0
def subscribe(self, topic, qos=0):
assert self.cb is not None, "Subscribe callback is not set"
pkt = bytearray(b"\x82\0\0\0")
self.pid += 1
struct.pack_into("!BH", pkt, 1, 2 + 2 + len(topic) + 1, self.pid)
#print(hex(len(pkt)), hexlify(pkt, ":"))
self.sock.write(pkt)
self._send_str(topic)
self.sock.write(qos.to_bytes(1, "little"))
while 1:
op = self.wait_msg()
if op == 0x90:
resp = self.sock.read(4)
#print(resp)
assert resp[1] == pkt[2] and resp[2] == pkt[3]
if resp[3] == 0x80:
raise MQTTException(resp[3])
return
def unsubscribe(self, topic, qos=0):
pkt = bytearray(b"\xA0\0\0\0")
self.pid += 1
struct.pack_into("!BH", pkt, 1, 2 + 2 + len(topic) + 1, self.pid)
self.sock.write(pkt)
self._send_str(topic)
self.sock.write(qos.to_bytes(1, "little"))
while 1:
op = self.wait_msg()
if op == 0xB0:
resp = self.sock.read(4)
print(resp)
return
# Wait for a single incoming MQTT message and process it.
# Subscribed messages are delivered to a callback previously
# set by .set_callback() method. Other (internal) MQTT
# messages processed internally.
def wait_msg(self):
res = self.sock.read(1)
self.sock.setblocking(True)
if res is None:
return None
if res == b"":
#raise OSError(-1)
return None
if res == b"\xd0": # PINGRESP
sz = self.sock.read(1)#[0]
print(sz)
assert sz[0] == 0
return None
if res == b"\x40": #PUBACK
sz = self.sock.read(1)
print(sz)
return None
op = res[0]
if op & 0xf0 != 0x30:
return op
sz = self._recv_len()
topic_len = self.sock.read(2)
topic_len = (topic_len[0] << 8) | topic_len[1]
topic = self.sock.read(topic_len)
sz -= topic_len + 2
if op & 6:
pid = self.sock.read(2)
pid = pid[0] << 8 | pid[1]
sz -= 2
msg = self.sock.read(sz)
self.cb(topic, msg)
if op & 6 == 2:
pkt = bytearray(b"\x40\x02\0\0")
struct.pack_into("!H", pkt, 2, pid)
self.sock.write(pkt)
elif op & 6 == 4:
assert 0
# Checks whether a pending message from server is available.
# If not, returns immediately with None. Otherwise, does
# the same processing as wait_msg.
def check_msg(self):
self.sock.setblocking(False)
return self.wait_msg()
| |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import (abs, sum, sin, cos, sqrt, log, prod, where, pi, exp, arange,
floor, log10, atleast_2d, zeros)
from .go_benchmark import Benchmark
class Parsopoulos(Benchmark):
r"""
Parsopoulos objective function.
This class defines the Parsopoulos [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Parsopoulos}}(x) = \cos(x_1)^2 + \sin(x_2)^2
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: This function has infinite number of global minima in R2,
at points :math:`\left(k\frac{\pi}{2}, \lambda \pi \right)`,
where :math:`k = \pm1, \pm3, ...` and :math:`\lambda = 0, \pm1, \pm2, ...`
In the given domain problem, function has 12 global minima all equal to
zero.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[pi / 2.0, pi]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return cos(x[0]) ** 2.0 + sin(x[1]) ** 2.0
class Pathological(Benchmark):
r"""
Pathological objective function.
This class defines the Pathological [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Pathological}}(x) = \sum_{i=1}^{n -1} \frac{\sin^{2}\left(
\sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\right) -0.5}{0.001 \left(x_{i}^{2}
- 2x_{i}x_{i+1} + x_{i+1}^{2}\right)^{2} + 0.50}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.
def fun(self, x, *args):
self.nfev += 1
vec = (0.5 + (sin(sqrt(100 * x[: -1] ** 2 + x[1:] ** 2)) ** 2 - 0.5) /
(1. + 0.001 * (x[: -1] ** 2 - 2 * x[: -1] * x[1:]
+ x[1:] ** 2) ** 2))
return sum(vec)
class Paviani(Benchmark):
r"""
Paviani objective function.
This class defines the Paviani [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Paviani}}(x) = \sum_{i=1}^{10} \left[\log^{2}\left(10
- x_i\right) + \log^{2}\left(x_i -2\right)\right]
- \left(\prod_{i=1}^{10} x_i^{10} \right)^{0.2}
with :math:`x_i \in [2.001, 9.999]` for :math:`i = 1, ... , 10`.
*Global optimum*: :math:`f(x_i) = -45.7784684040686` for
:math:`x_i = 9.350266` for :math:`i = 1, ..., 10`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: think Gavana web/code definition is wrong because final product term
shouldn't raise x to power 10.
"""
def __init__(self, dimensions=10):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([2.001] * self.N, [9.999] * self.N))
self.global_optimum = [[9.350266 for _ in range(self.N)]]
self.fglob = -45.7784684040686
def fun(self, x, *args):
self.nfev += 1
return sum(log(x - 2) ** 2.0 + log(10.0 - x) ** 2.0) - prod(x) ** 0.2
class Penalty01(Benchmark):
r"""
Penalty 1 objective function.
This class defines the Penalty 1 [1]_ global optimization problem. This is a
imultimodal minimization problem defined as follows:
.. math::
f_{\text{Penalty01}}(x) = \frac{\pi}{30} \left\{10 \sin^2(\pi y_1)
+ \sum_{i=1}^{n-1} (y_i - 1)^2 \left[1 + 10 \sin^2(\pi y_{i+1}) \right]
+ (y_n - 1)^2 \right \} + \sum_{i=1}^n u(x_i, 10, 100, 4)
Where, in this exercise:
.. math::
y_i = 1 + \frac{1}{4}(x_i + 1)
And:
.. math::
u(x_i, a, k, m) =
\begin{cases}
k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\
0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\
k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-50, 50]` for :math:`i= 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = -1` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([-5.0, 5.0], [-5.0, 5.0])
self.global_optimum = [[-1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
a, b, c = 10.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b * (xx - a) ** c, 0.0)
y = 1.0 + (x + 1.0) / 4.0
return (sum(u) + (pi / 30.0) * (10.0 * sin(pi * y[0]) ** 2.0
+ sum((y[: -1] - 1.0) ** 2.0
* (1.0 + 10.0 * sin(pi * y[1:]) ** 2.0))
+ (y[-1] - 1) ** 2.0))
class Penalty02(Benchmark):
r"""
Penalty 2 objective function.
This class defines the Penalty 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Penalty02}}(x) = 0.1 \left\{\sin^2(3\pi x_1) + \sum_{i=1}^{n-1}
(x_i - 1)^2 \left[1 + \sin^2(3\pi x_{i+1}) \right ]
+ (x_n - 1)^2 \left [1 + \sin^2(2 \pi x_n) \right ]\right \}
+ \sum_{i=1}^n u(x_i, 5, 100, 4)
Where, in this exercise:
.. math::
u(x_i, a, k, m) =
\begin{cases}
k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\
0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\
k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a \\
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-50, 50]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
a, b, c = 5.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b * (xx - a) ** c, 0.0)
return (sum(u) + 0.1 * (10 * sin(3.0 * pi * x[0]) ** 2.0
+ sum((x[:-1] - 1.0) ** 2.0
* (1.0 + sin(3 * pi * x[1:]) ** 2.0))
+ (x[-1] - 1) ** 2.0 * (1 + sin(2 * pi * x[-1]) ** 2.0)))
class PenHolder(Benchmark):
r"""
PenHolder objective function.
This class defines the PenHolder [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{PenHolder}}(x) = -e^{\left|{e^{-\left|{- \frac{\sqrt{x_{1}^{2}
+ x_{2}^{2}}}{\pi} + 1}\right|} \cos\left(x_{1}\right)
\cos\left(x_{2}\right)}\right|^{-1}}
with :math:`x_i \in [-11, 11]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -0.9635348327265058` for
:math:`x_i = \pm 9.646167671043401` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-11.0] * self.N, [11.0] * self.N))
self.global_optimum = [[-9.646167708023526, 9.646167671043401]]
self.fglob = -0.9635348327265058
def fun(self, x, *args):
self.nfev += 1
a = abs(1. - (sqrt(x[0] ** 2 + x[1] ** 2) / pi))
b = cos(x[0]) * cos(x[1]) * exp(a)
return -exp(-abs(b) ** -1)
class PermFunction01(Benchmark):
r"""
PermFunction 1 objective function.
This class defines the PermFunction1 [1]_ global optimization problem. This is
a multimodal minimization problem defined as follows:
.. math::
f_{\text{PermFunction01}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j^k
+ \beta) \left[ \left(\frac{x_j}{j}\right)^k - 1 \right] \right\}^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-n, n + 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = i` for
:math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO: line 560
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-self.N] * self.N,
[self.N + 1] * self.N))
self.global_optimum = [list(range(1, self.N + 1))]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
b = 0.5
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j ** k + b) * ((x / j) ** k - 1)
return sum((sum(s, axis=1) ** 2))
class PermFunction02(Benchmark):
r"""
PermFunction 2 objective function.
This class defines the Perm Function 2 [1]_ global optimization problem. This is
a multimodal minimization problem defined as follows:
.. math::
f_{\text{PermFunction02}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j
+ \beta) \left[ \left(x_j^k - {\frac{1}{j}}^{k} \right )
\right] \right\}^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-n, n+1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \frac{1}{i}`
for :math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO: line 582
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-self.N] * self.N,
[self.N + 1] * self.N))
self.custom_bounds = ([0, 1.5], [0, 1.0])
self.global_optimum = [1. / arange(1, self.N + 1)]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
b = 10
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j + b) * (x ** k - (1. / j) ** k)
return sum((sum(s, axis=1) ** 2))
class Pinter(Benchmark):
r"""
Pinter objective function.
This class defines the Pinter [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Pinter}}(x) = \sum_{i=1}^n ix_i^2 + \sum_{i=1}^n 20i
\sin^2 A + \sum_{i=1}^n i \log_{10} (1 + iB^2)
Where, in this exercise:
.. math::
\begin{cases}
A = x_{i-1} \sin x_i + \sin x_{i+1} \\
B = x_{i-1}^2 - 2x_i + 3x_{i + 1} - \cos x_i + 1\\
\end{cases}
Where :math:`x_0 = x_n` and :math:`x_{n + 1} = x_1`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(self.N) + 1
xx = zeros(self.N + 2)
xx[1: - 1] = x
xx[0] = x[-1]
xx[-1] = x[0]
A = xx[0: -2] * sin(xx[1: - 1]) + sin(xx[2:])
B = xx[0: -2] ** 2 - 2 * xx[1: - 1] + 3 * xx[2:] - cos(xx[1: - 1]) + 1
return (sum(i * x ** 2)
+ sum(20 * i * sin(A) ** 2)
+ sum(i * log10(1 + i * B ** 2)))
class Plateau(Benchmark):
r"""
Plateau objective function.
This class defines the Plateau [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Plateau}}(x) = 30 + \sum_{i=1}^n \lfloor \lvert x_i
\rvert\rfloor
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 30` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 30.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return 30.0 + sum(floor(abs(x)))
class Powell(Benchmark):
r"""
Powell objective function.
This class defines the Powell [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Powell}}(x) = (x_3+10x_1)^2 + 5(x_2-x_4)^2 + (x_1-2x_2)^4
+ 10(x_3-x_4)^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-4, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., 4`
..[1] Powell, M. An iterative method for finding stationary values of a
function of several variables Computer Journal, 1962, 5, 147-151
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-4.0] * self.N, [5.0] * self.N))
self.global_optimum = [[0, 0, 0, 0]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return ((x[0] + 10 * x[1]) ** 2 + 5 * (x[2] - x[3]) ** 2
+ (x[1] - 2 * x[2]) ** 4 + 10 * (x[0] - x[3]) ** 4)
class PowerSum(Benchmark):
r"""
Power sum objective function.
This class defines the Power Sum global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{PowerSum}}(x) = \sum_{k=1}^n\left[\left(\sum_{i=1}^n x_i^k
\right) - b_k \right]^2
Where, in this exercise, :math:`b = [8, 18, 44, 114]`
Here, :math:`x_i \in [0, 4]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2, 2, 3]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[4.0] * self.N))
self.global_optimum = [[1.0, 2.0, 2.0, 3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
b = [8.0, 18.0, 44.0, 114.0]
k = atleast_2d(arange(self.N) + 1).T
return sum((sum(x ** k, axis=1) - b) ** 2)
class Price01(Benchmark):
r"""
Price 1 objective function.
This class defines the Price 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Price01}}(x) = (\lvert x_1 \rvert - 5)^2
+ (\lvert x_2 \rvert - 5)^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [5, 5]` or
:math:`x = [5, -5]` or :math:`x = [-5, 5]` or :math:`x = [-5, -5]`.
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])
self.global_optimum = [[5.0, 5.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0]) - 5.0) ** 2.0 + (abs(x[1]) - 5.0) ** 2.0
class Price02(Benchmark):
r"""
Price 2 objective function.
This class defines the Price 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Price02}}(x) = 1 + \sin^2(x_1) + \sin^2(x_2)
- 0.1e^{(-x_1^2 - x_2^2)}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.9` for :math:`x_i = [0, 0]`
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.9
def fun(self, x, *args):
self.nfev += 1
return 1.0 + sum(sin(x) ** 2) - 0.1 * exp(-x[0] ** 2.0 - x[1] ** 2.0)
class Price03(Benchmark):
r"""
Price 3 objective function.
This class defines the Price 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Price03}}(x) = 100(x_2 - x_1^2)^2 + \left[6.4(x_2 - 0.5)^2
- x_1 - 0.6 \right]^2
with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-5, -5]`,
:math:`x = [-5, 5]`, :math:`x = [5, -5]`, :math:`x = [5, 5]`.
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
TODO Jamil #96 has an erroneous factor of 6 in front of the square brackets
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[1] - x[0] ** 2) ** 2
+ (6.4 * (x[1] - 0.5) ** 2 - x[0] - 0.6) ** 2)
class Price04(Benchmark):
r"""
Price 4 objective function.
This class defines the Price 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Price04}}(x) = (2 x_1^3 x_2 - x_2^3)^2
+ (6 x_1 - x_2^2 + x_2)^2
with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`,
:math:`x = [2, 4]` and :math:`x = [1.464, -2.506]`
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[2.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((2.0 * x[1] * x[0] ** 3.0 - x[1] ** 3.0) ** 2.0
+ (6.0 * x[0] - x[1] ** 2.0 + x[1]) ** 2.0)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v1.py.
This test mainly focuses on comparing slim MobilenetV1 and Keras MobilenetV1 for
object detection. To verify the consistency of the two models, we compare:
1. Output shape of each layer given different inputs
2. Number of global variables
We also visualize the model structure via Tensorboard, and compare the model
layout and the parameters of each Op to make sure the two implementations are
consistent.
"""
import itertools
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models.keras_models import mobilenet_v1
from object_detection.models.keras_models import test_utils
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
_KERAS_LAYERS_TO_CHECK = [
'conv1_relu',
'conv_dw_1_relu', 'conv_pw_1_relu',
'conv_dw_2_relu', 'conv_pw_2_relu',
'conv_dw_3_relu', 'conv_pw_3_relu',
'conv_dw_4_relu', 'conv_pw_4_relu',
'conv_dw_5_relu', 'conv_pw_5_relu',
'conv_dw_6_relu', 'conv_pw_6_relu',
'conv_dw_7_relu', 'conv_pw_7_relu',
'conv_dw_8_relu', 'conv_pw_8_relu',
'conv_dw_9_relu', 'conv_pw_9_relu',
'conv_dw_10_relu', 'conv_pw_10_relu',
'conv_dw_11_relu', 'conv_pw_11_relu',
'conv_dw_12_relu', 'conv_pw_12_relu',
'conv_dw_13_relu', 'conv_pw_13_relu',
]
_NUM_CHANNELS = 3
_BATCH_SIZE = 2
class MobilenetV1Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
train: true,
scale: false,
center: true,
decay: 0.2,
epsilon: 0.1,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_application_with_layer_outputs(
self, layer_names, batchnorm_training,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None):
"""Constructs Keras MobilenetV1 that extracts intermediate layer outputs."""
if not layer_names:
layer_names = _KERAS_LAYERS_TO_CHECK
full_model = mobilenet_v1.mobilenet_v1(
batchnorm_training=batchnorm_training,
conv_hyperparams=conv_hyperparams,
weights=None,
use_explicit_padding=use_explicit_padding,
alpha=alpha,
min_depth=min_depth,
include_top=False)
layer_outputs = [full_model.get_layer(name=layer).output
for layer in layer_names]
return tf.keras.Model(
inputs=full_model.inputs,
outputs=layer_outputs)
def _check_returns_correct_shape(
self, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=False, min_depth=8,
layer_names=None):
def graph_fn(image_tensor):
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=depth_multiplier)
return model(image_tensor)
image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shape):
self.assertAllEqual(feature_map.shape, expected_shape)
def _check_returns_correct_shapes_with_dynamic_inputs(
self, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=False, min_depth=8,
layer_names=None):
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS], dtype=tf.float32)
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False,
use_explicit_padding=use_explicit_padding,
alpha=depth_multiplier)
return model(image_tensor)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shape):
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None):
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(
tf.float32, (4, None, None, _NUM_CHANNELS))
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=False,
alpha=depth_multiplier)
model(preprocessed_inputs)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
def test_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_128)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_128_explicit_padding)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape,
use_explicit_padding=True)
def test_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs)
self._check_returns_correct_shapes_with_dynamic_inputs(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_299)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_enforcing_min_depth)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_hyperparam_override(self):
hyperparams = self._build_conv_hyperparams()
model = mobilenet_v1.mobilenet_v1(
batchnorm_training=True,
conv_hyperparams=hyperparams,
weights=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=32,
include_top=False)
hyperparams.params()
bn_layer = model.get_layer(name='conv_pw_5_bn')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
depth_multiplier = 1
variables = self._get_variables(depth_multiplier)
# 135 is the number of variables from slim MobilenetV1 model.
self.assertEqual(len(variables), 135)
if __name__ == '__main__':
tf.test.main()
| |
# -*- coding: utf-8 -*-
'''
Bottom Sheets
=============
`Material Design spec Bottom Sheets page <http://www.google.com/design/spec/components/bottom-sheets.html>`_
In this module there's the :class:`MDBottomSheet` class which will let you implement your own Material Design Bottom Sheets, and there are two classes called :class:`MDListBottomSheet` and :class:`MDGridBottomSheet` implementing the ones mentioned in the spec.
Examples
--------
.. note::
These widgets are designed to be called from Python code only.
For :class:`MDListBottomSheet`:
.. code-block:: python
bs = MDListBottomSheet()
bs.add_item("Here's an item with text only", lambda x: x)
bs.add_item("Here's an item with an icon", lambda x: x, icon='md-cast')
bs.add_item("Here's another!", lambda x: x, icon='md-nfc')
bs.open()
For :class:`MDListBottomSheet`:
.. code-block:: python
bs = MDGridBottomSheet()
bs.add_item("Facebook", lambda x: x, icon_src='./assets/facebook-box.png')
bs.add_item("YouTube", lambda x: x, icon_src='./assets/youtube-play.png')
bs.add_item("Twitter", lambda x: x, icon_src='./assets/twitter.png')
bs.add_item("Da Cloud", lambda x: x, icon_src='./assets/cloud-upload.png')
bs.add_item("Camera", lambda x: x, icon_src='./assets/camera.png')
bs.open()
API
---
'''
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.modalview import ModalView
from kivy.uix.scrollview import ScrollView
from kivymd.backgroundcolorbehavior import BackgroundColorBehavior
from kivymd.label import MDLabel
from kivymd.list import MDList, OneLineListItem, ILeftBody, \
OneLineIconListItem
from kivymd.theming import ThemableBehavior
Builder.load_string('''
<MDBottomSheet>
background: 'atlas://data/images/defaulttheme/action_group_disabled'
background_color: 0,0,0,.8
sv: sv
upper_padding: upper_padding
gl_content: gl_content
ScrollView:
id: sv
do_scroll_x: False
BoxLayout:
size_hint_y: None
orientation: 'vertical'
padding: 0,1,0,0
height: upper_padding.height + gl_content.height + 1 # +1 to allow overscroll
BsPadding:
id: upper_padding
size_hint_y: None
height: root.height - min(root.width * 9 / 16, gl_content.height)
on_release: root.dismiss()
BottomSheetContent:
id: gl_content
size_hint_y: None
background_color: root.theme_cls.bg_normal
cols: 1
''')
class BsPadding(ButtonBehavior, FloatLayout):
pass
class BottomSheetContent(BackgroundColorBehavior, GridLayout):
pass
class MDBottomSheet(ThemableBehavior, ModalView):
sv = ObjectProperty()
upper_padding = ObjectProperty()
gl_content = ObjectProperty()
dismiss_zone_scroll = 1000 # Arbitrary high number
def open(self, *largs):
super(MDBottomSheet, self).open(*largs)
Clock.schedule_once(self.set_dismiss_zone, 0)
def set_dismiss_zone(self, *largs):
# Scroll to right below overscroll threshold:
self.sv.scroll_y = 1 - self.sv.convert_distance_to_scroll(0, 1)[1]
# This is a line where m (slope) is 1/6 and b (y-intercept) is 80:
self.dismiss_zone_scroll = self.sv.convert_distance_to_scroll(
0, (self.height - self.upper_padding.height) * (1 / 6.0) + 80)[
1]
# Uncomment next line if the limit should just be half of
# visible content on open (capped by specs to 16 units to width/9:
# self.dismiss_zone_scroll = (self.sv.convert_distance_to_scroll(
# 0, self.height - self.upper_padding.height)[1] * 0.50)
# Check if user has overscrolled enough to dismiss bottom sheet:
self.sv.bind(on_scroll_stop=self.check_if_scrolled_to_death)
def check_if_scrolled_to_death(self, *largs):
if self.sv.scroll_y >= 1 + self.dismiss_zone_scroll:
self.dismiss()
def add_widget(self, widget, index=0):
if type(widget) == ScrollView:
super(MDBottomSheet, self).add_widget(widget, index)
else:
self.gl_content.add_widget(widget, index)
Builder.load_string('''
#:import md_icons kivymd.icon_definitions.md_icons
<ListBSIconLeft>
font_style: 'Icon'
text: u"{}".format(md_icons[root.icon])
halign: 'center'
theme_text_color: 'Primary'
valign: 'middle'
''')
class ListBSIconLeft(ILeftBody, MDLabel):
icon = StringProperty()
class MDListBottomSheet(MDBottomSheet):
mlist = ObjectProperty()
def __init__(self, **kwargs):
super(MDListBottomSheet, self).__init__(**kwargs)
self.mlist = MDList()
self.gl_content.add_widget(self.mlist)
Clock.schedule_once(self.resize_content_layout, 0)
def resize_content_layout(self, *largs):
self.gl_content.height = self.mlist.height
def add_item(self, text, callback, icon=None):
if icon:
item = OneLineIconListItem(text=text, on_press=callback)
item.add_widget(ListBSIconLeft(icon=icon))
else:
item = OneLineListItem(text=text, on_press=callback)
#apparently this breaks the function command
item.bind(on_release=lambda x: self.dismiss())
self.mlist.add_widget(item)
Builder.load_string('''
<GridBSItem>
orientation: 'vertical'
padding: 0, dp(24), 0, 0
size_hint_y: None
size: dp(64), dp(96)
BoxLayout:
padding: dp(8), 0, dp(8), dp(8)
size_hint_y: None
height: dp(48)
Image:
source: root.source
MDLabel:
font_style: 'Caption'
theme_text_color: 'Secondary'
text: root.caption
halign: 'center'
''')
class GridBSItem(ButtonBehavior, BoxLayout):
source = StringProperty()
caption = StringProperty()
class MDGridBottomSheet(MDBottomSheet):
def __init__(self, **kwargs):
super(MDGridBottomSheet, self).__init__(**kwargs)
self.gl_content.padding = (dp(16), 0, dp(16), dp(24))
self.gl_content.height = dp(24)
self.gl_content.cols = 3
def add_item(self, text, callback, icon_src):
item = GridBSItem(
caption=text,
on_release=callback,
source=icon_src
)
item.bind(on_release=lambda x: self.dismiss())
if len(self.gl_content.children) % 3 == 0:
self.gl_content.height += dp(96)
self.gl_content.add_widget(item)
| |
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ganeti Remote API master script.
"""
# pylint: disable=C0103,W0142
# C0103: Invalid name ganeti-watcher
import logging
import optparse
import sys
from ganeti import constants
from ganeti import http
from ganeti import daemon
from ganeti import ssconf
import ganeti.rpc.errors as rpcerr
from ganeti import serializer
from ganeti import pathutils
from ganeti.rapi import connector
from ganeti.rapi import baserlib
from ganeti.rapi.auth import basic_auth
from ganeti.rapi.auth import pam
import ganeti.http.auth # pylint: disable=W0611
import ganeti.http.server
class RemoteApiRequestContext(object):
"""Data structure for Remote API requests.
"""
def __init__(self):
self.handler = None
self.handler_fn = None
self.handler_access = None
self.body_data = None
class RemoteApiHandler(http.auth.HttpServerRequestAuthentication,
http.server.HttpServerHandler):
"""REST Request Handler Class.
"""
AUTH_REALM = "Ganeti Remote API"
def __init__(self, authenticator, reqauth, _client_cls=None):
"""Initializes this class.
@type authenticator: an implementation of {RapiAuthenticator} interface
@param authenticator: a class containing an implementation of
ValidateRequest function
@type reqauth: bool
@param reqauth: Whether to require authentication
"""
# pylint: disable=W0233
# it seems pylint doesn't see the second parent class there
http.server.HttpServerHandler.__init__(self)
http.auth.HttpServerRequestAuthentication.__init__(self)
self._client_cls = _client_cls
self._resmap = connector.Mapper()
self._authenticator = authenticator
self._reqauth = reqauth
@staticmethod
def FormatErrorMessage(values):
"""Formats the body of an error message.
@type values: dict
@param values: dictionary with keys C{code}, C{message} and C{explain}.
@rtype: tuple; (string, string)
@return: Content-type and response body
"""
return (http.HTTP_APP_JSON, serializer.DumpJson(values))
def _GetRequestContext(self, req):
"""Returns the context for a request.
The context is cached in the req.private variable.
"""
if req.private is None:
(HandlerClass, items, args) = \
self._resmap.getController(req.request_path)
ctx = RemoteApiRequestContext()
ctx.handler = HandlerClass(items, args, req, _client_cls=self._client_cls)
method = req.request_method.upper()
try:
ctx.handler_fn = getattr(ctx.handler, method)
except AttributeError:
raise http.HttpNotImplemented("Method %s is unsupported for path %s" %
(method, req.request_path))
ctx.handler_access = baserlib.GetHandlerAccess(ctx.handler, method)
# Require permissions definition (usually in the base class)
if ctx.handler_access is None:
raise AssertionError("Permissions definition missing")
# This is only made available in HandleRequest
ctx.body_data = None
req.private = ctx
# Check for expected attributes
assert req.private.handler
assert req.private.handler_fn
assert req.private.handler_access is not None
return req.private
def AuthenticationRequired(self, req):
"""Determine whether authentication is required.
"""
return self._reqauth
def Authenticate(self, req):
"""Checks whether a user can access a resource.
@return: username of an authenticated user or None otherwise
"""
ctx = self._GetRequestContext(req)
auth_user = self._authenticator.ValidateRequest(
req, ctx.handler_access, self.GetAuthRealm(req))
if auth_user is None:
return False
ctx.handler.auth_user = auth_user
return True
def HandleRequest(self, req):
"""Handles a request.
"""
ctx = self._GetRequestContext(req)
# Deserialize request parameters
if req.request_body:
# RFC2616, 7.2.1: Any HTTP/1.1 message containing an entity-body SHOULD
# include a Content-Type header field defining the media type of that
# body. [...] If the media type remains unknown, the recipient SHOULD
# treat it as type "application/octet-stream".
req_content_type = req.request_headers.get(http.HTTP_CONTENT_TYPE,
http.HTTP_APP_OCTET_STREAM)
if req_content_type.lower() != http.HTTP_APP_JSON.lower():
raise http.HttpUnsupportedMediaType()
try:
ctx.body_data = serializer.LoadJson(req.request_body)
except Exception:
raise http.HttpBadRequest(message="Unable to parse JSON data")
else:
ctx.body_data = None
try:
result = ctx.handler_fn()
except rpcerr.TimeoutError:
raise http.HttpGatewayTimeout()
except rpcerr.ProtocolError, err:
raise http.HttpBadGateway(str(err))
req.resp_headers[http.HTTP_CONTENT_TYPE] = http.HTTP_APP_JSON
return serializer.DumpJson(result)
def CheckRapi(options, args):
"""Initial checks whether to run or exit with a failure.
"""
if args: # rapi doesn't take any arguments
print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
sys.argv[0])
sys.exit(constants.EXIT_FAILURE)
ssconf.CheckMaster(options.debug)
# Read SSL certificate (this is a little hackish to read the cert as root)
if options.ssl:
options.ssl_params = http.HttpSslParams(ssl_key_path=options.ssl_key,
ssl_cert_path=options.ssl_cert)
else:
options.ssl_params = None
def PrepRapi(options, _):
"""Prep remote API function, executed with the PID file held.
"""
mainloop = daemon.Mainloop()
if options.pamauth:
options.reqauth = True
authenticator = pam.PamAuthenticator()
else:
authenticator = basic_auth.BasicAuthenticator()
handler = RemoteApiHandler(authenticator, options.reqauth)
server = \
http.server.HttpServer(mainloop, options.bind_address, options.port,
handler,
ssl_params=options.ssl_params, ssl_verify_peer=False)
server.Start()
return (mainloop, server)
def ExecRapi(options, args, prep_data): # pylint: disable=W0613
"""Main remote API function, executed with the PID file held.
"""
(mainloop, server) = prep_data
try:
mainloop.Run()
finally:
logging.error("RAPI Daemon Failed")
server.Stop()
def Main():
"""Main function.
"""
parser = optparse.OptionParser(description="Ganeti Remote API",
usage=("%prog [-f] [-d] [-p port] [-b ADDRESS]"
" [-i INTERFACE]"),
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("--require-authentication", dest="reqauth",
default=False, action="store_true",
help=("Disable anonymous HTTP requests and require"
" authentication"))
parser.add_option("--pam-authentication", dest="pamauth",
default=False, action="store_true",
help=("Enable RAPI authentication and authorization via"
" PAM"))
daemon.GenericMain(constants.RAPI, parser, CheckRapi, PrepRapi, ExecRapi,
default_ssl_cert=pathutils.RAPI_CERT_FILE,
default_ssl_key=pathutils.RAPI_CERT_FILE)
| |
'''Class SigFig for repressenting digits of a measurement that
convey meaningful inormation (i.e. are significant).
'''
from __future__ import absolute_import
from decimal import Decimal
from collections import defaultdict
from hlab.lexing import Lexer, LexicalError
from hlab.bases import AutoRepr
valid_digits = tuple(range(10))
class SigFig(AutoRepr):
def __init__(self, arg):
if isinstance(arg, unicode):
arg = str(arg)
elif isinstance(arg, Decimal):
sign, digits, exp = arg.as_tuple()
arg = (sign, digits, exp - 1 + len(digits))
if isinstance(arg, str):
arg = parse_string(arg)
elif isinstance(arg, (int,long)):
arg = parse_string(str(arg))
elif isinstance(arg, SigFig):
arg = (arg.sign, arg.digits, arg.power)
sign, digits, power = arg
digits = tuple(digits)
assert sign in (0,1)
assert all(isinstance(digit, (int,long)) and digit in valid_digits
for digit in digits)
assert isinstance(power, (int,long))
self.sign = sign
self.digits = digits
self.power = power
def repr_args(self):
return [str(self)]
#legacy method
def as_scientific(self):
return self
def as_decimal(self):
return Decimal((self.sign, self.digits, self.least_significant_place))
@property
def sigfigs(self):
if self.digits[0] != 0:
return len(self.digits)
return max(1, len(self.digits) - 1)
@property
def most_significant_place(self):
return self.power
@property
def least_significant_place(self):
return 1 + self.power - len(self.digits)
def round_to_sigfigs(self, n):
if n <= 0:
raise ValueError("rounding %s to invalid number of sig figs %d" % (self, n))
return self.round_at_index(n)
def round_to_place(self, n):
return self.round_at_index(self.power - n + 1)
def round_at_index(self, i):
if i < 0:
return self.__class__('0')
if i >= len(self.digits):
return self.shift_sigfigs(-(i - len(self.digits)))
digits = list(self.digits)
power = self.power
if i==0:
digits.insert(0, 0)
power += 1
i += 1
round_dig = digits[i]
if (round_dig > 5) or (round_dig == 5 and digits[i-1]%2):
digits[i-1] += 1
# carray
for j in xrange(len(digits) - 1, 0, -1):
if digits[j] == 10:
digits[j] = 0
digits[j-1] += 1
digits = digits[:i:]
if digits[0] == 10:
digits[0] = 0
digits.insert(0, 1)
power += 1
if digits[0] == 0:
del digits[0]
power -= 1
if not digits:
return SigFig('0')
return self.__class__((self.sign, digits, power))
def shift_sigfigs(self, sigfigs=1):
if sigfigs == 0:
return self
if sigfigs > 0:
digits = self.digits[:-sigfigs:]
if not digits:
return self.__class__(0)
else:
digits = self.digits + (0,) * -sigfigs
return self.__class__((self.sign, digits, self.power))
def __str__(self):
base, exp = self.get_format_args()
if exp is None:
return base
return '%se%s' % (base, exp)
def get_format_args(self, min_exp_power=3):
if self.digits[0] == 0:
return self._get_zero_format_args()
sigfigs = self.sigfigs
power = self.power
if min_exp_power!=None and abs(power) > max(sigfigs, min_exp_power):
return self.get_exp_format_args()
digs = map(str, self.digits)
#deal with trailing zeros
if sigfigs > power:
insignificant_zeros = 0
#prevet trailing decimal, i.e. 10.
if power > 0 and len(digs) == power+1 and digs[-1] == '0':
return self.get_exp_format_args()
else:
#check for significant trailing zeros
if self.digits[-1::] == (0,):
return self.get_exp_format_args()
#add insignificant trailing zeros
insignificant_zeros = (1 + power - sigfigs)
digs += ['0'] * insignificant_zeros
#special case to prevent decimal points in zero
if digs == ['0']:
return '0', None
#place deicmal point
if power >= 0:
if not insignificant_zeros and power+1 < len(digs):
digs.insert(1+power, '.')
else:
digs = ['0.'] + ['0'] * (-1 - power) + digs
return ['%s%s' % ('-' if self.sign else '', ''.join(digs)),
None]
def _get_zero_format_args(self):
assert set(self.digits) == set([0])
if self.power > 0:
return self.get_exp_format_args()
assert self.digits == (0,)
if self.power == 0:
return '0', None
return '0.' + '0' * -self.power, None
def get_exp_format_args(self):
return ['%s%d%s' % ('-' if self.sign else '',
self.digits[0],
'.' + ''.join(map(str, self.digits[1::])) if
len(self.digits) > 1 else ''),
self.power]
def __pos__(self):
return self
def __neg__(self):
return self.__class__((1 if self.sign==0 else 0,
self.digits, self.power))
def __nonzero__(self):
return self.as_decimal() != 0
def perform_binary_operation(self, other, func, rule):
if not isinstance(other, (SigFig, Decimal, int, long)):
return NotImplemented
selfd = self.as_decimal()
otherd = other.as_decimal() if isinstance(other, SigFig) else other
valued = func(selfd, otherd)
value = self.__class__(valued)
sigfigs = (min(self.sigfigs, other.sigfigs)
if isinstance(other, SigFig) else
self.sigfigs)
if rule=='mul':
if valued == 0:
value.digits = (0,) * sigfigs
else:
value = value.round_to_sigfigs(min(self.sigfigs, other.sigfigs)
if isinstance(other, SigFig) else
self.sigfigs)
elif rule=='add':
lsp = (max(self.least_significant_place, other.least_significant_place)
if isinstance(other, SigFig) else
self.least_significant_place)
value = value.round_to_place(lsp)
# if value.digits == (0,):
# value.digits = (0,) * max(1, sigfigs)
else:
raise ValueError("bad sigfig rule %r" % (rule,))
return value
def __mul__(self, other):
return self.perform_binary_operation(other, lambda a,b: a*b, 'mul')
def __rmul__(self, other):
return self.perform_binary_operation(other, lambda a,b: b*a, 'mul')
def __div__(self, other):
return self.perform_binary_operation(other, lambda a,b: a/b, 'mul')
def __rdiv__(self, other):
return self.perform_binary_operation(other, lambda a,b: b/a, 'mul')
def __mod__(self, other):
return self.perform_binary_operation(other, lambda a,b: a%b, 'mul')
def __rmod__(self, other):
return self.perform_binary_operation(other, lambda a,b: b%a, 'mul')
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __add__(self, other):
return self.perform_binary_operation(other, lambda a,b: a+b, 'add')
def __radd__(self, other):
return self.perform_binary_operation(other, lambda a,b: b+a, 'add')
def __sub__(self, other):
return self.perform_binary_operation(other, lambda a,b: a-b, 'add')
def __rsub__(self, other):
return self.perform_binary_operation(other, lambda a,b: b-a, 'add')
def __gt__(self, other):
return (self-other).as_decimal() > 0
def __ge__(self, other):
return (self-other).as_decimal() >= 0
def __eq__(self, other):
if not isinstance(other, (int,long,Decimal,SigFig)):
return NotImplemented
try:
return not (abs((self-other).as_decimal()) > 0)
except TypeError,e:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
return (self-other).as_decimal() <= 0
def __lt__(self, other):
return (self-other).as_decimal() < 0
def __pow__(self, op):
if not isinstance(op, (int,long)):
raise TypeError("pow not implemented for non-integer type %r" % (op,))
if op < 0:
return 1 / self**-op
if op == 0:
return self / self
return (self.as_decimal() ** op).round_to_sigfigs(self.sigfigs)
def sqrt(self):
return self.__class__(self.as_decimal().sqrt()).round_to_sigfigs(self.sigfigs)
def parse_string(bytes):
lex = Lexer(bytes.strip())
[pm, digs_pre_dot, dot, digs_post_dot, exp, exp_power
] = lex.pulls(r'[+-]', r'\d+', r'\.', r'\d+', r'[eE]', r'[+-]?\d+')
if not lex.eof or (exp and not exp_power):
raise LexicalError("bad sigfig literal %r" % (bytes,))
sign = 1 if pm == '-' else 0
power = int(exp_power) if exp_power else 0
digs_pre_dot = map(int, digs_pre_dot)
digs_post_dot = map(int, digs_post_dot)
#remove insignificant trailing zeros
if not dot:
if set(digs_pre_dot) > set([0]):
while digs_pre_dot and digs_pre_dot[-1] == 0:
digs_pre_dot.pop(-1)
power += 1
if not digs_pre_dot:
digs_pre_dot.append(0)
#make scientific by shifting digits and power accordingly
while len(digs_pre_dot) > 1:
digs_post_dot.insert(0, digs_pre_dot.pop(-1))
power += 1
assert len(digs_pre_dot) == 1
digits = digs_pre_dot + digs_post_dot
#remove insignificant leading zeros
while len(digits) > 1 and digits[0] == 0:
digits.pop(0)
power -= 1
digits = tuple(digits)
return sign, digits, power
| |
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import collections
import hashlib
import os
import platform
import subprocess
import sys
import settings
OUTPUT_DIR = os.path.join(settings.PROJECT_DIR, 'build', 'tests')
Options = collections.namedtuple('Options', ['name', 'build_args', 'test_args', 'skip'])
Options.__new__.__defaults__ = ([], [], False)
def skip_if(condition, desc):
return desc if condition else False
OPTIONS_COMMON = ['--lto=off']
OPTIONS_PROFILE_MIN = ['--profile=minimal']
OPTIONS_PROFILE_ES51 = ['--profile=es5.1']
OPTIONS_PROFILE_ESNEXT = ['--profile=es.next']
OPTIONS_STACK_LIMIT = ['--stack-limit=96']
OPTIONS_GC_MARK_LIMIT = ['--gc-mark-limit=16']
OPTIONS_MEM_STRESS = ['--mem-stress-test=on']
OPTIONS_DEBUG = ['--debug']
OPTIONS_SNAPSHOT = ['--snapshot-save=on', '--snapshot-exec=on', '--jerry-cmdline-snapshot=on']
OPTIONS_UNITTESTS = ['--unittests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on',
'--line-info=on', '--mem-stats=on']
OPTIONS_DOCTESTS = ['--doctests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on']
# Test options for unittests
JERRY_UNITTESTS_OPTIONS = [
Options('unittests-es.next',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ESNEXT),
Options('unittests-es.next-debug',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ESNEXT + OPTIONS_DEBUG),
Options('doctests-es.next',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ESNEXT),
Options('doctests-es.next-debug',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ESNEXT + OPTIONS_DEBUG),
Options('unittests-es5.1',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51),
Options('unittests-es5.1-debug',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('doctests-es5.1',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51),
Options('doctests-es5.1-debug',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('unittests-es5.1-debug-init-fini',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG
+ ['--cmake-param=-DFEATURE_INIT_FINI=ON'],
skip=skip_if((sys.platform == 'win32'), 'FEATURE_INIT_FINI build flag isn\'t supported on Windows,' +
' because Microsoft Visual C/C++ Compiler doesn\'t support' +
' library constructors and destructors.')),
]
# Test options for jerry-tests
JERRY_TESTS_OPTIONS = [
Options('jerry_tests-es.next-debug',
OPTIONS_COMMON + OPTIONS_PROFILE_ESNEXT + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ OPTIONS_MEM_STRESS),
Options('jerry_tests-es5.1',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT),
Options('jerry_tests-es5.1-snapshot',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT,
['--snapshot']),
Options('jerry_tests-es5.1-debug',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ OPTIONS_MEM_STRESS),
Options('jerry_tests-es5.1-debug-snapshot',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT
+ OPTIONS_GC_MARK_LIMIT, ['--snapshot']),
Options('jerry_tests-es5.1-debug-cpointer_32bit',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ ['--cpointer-32bit=on', '--mem-heap=1024']),
Options('jerry_tests-es5.1-debug-external_context',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ ['--external-context=on']),
]
# Test options for test262
TEST262_TEST_SUITE_OPTIONS = [
Options('test262_tests', OPTIONS_PROFILE_ES51),
Options('test262_tests-debug', OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG)
]
# Test options for test262-es2015
TEST262_ES2015_TEST_SUITE_OPTIONS = [
Options('test262_tests_es2015', OPTIONS_PROFILE_ESNEXT + ['--line-info=on', '--error-messages=on']),
]
# Test options for test262-esnext
TEST262_ESNEXT_TEST_SUITE_OPTIONS = [
Options('test262_tests_esnext', OPTIONS_PROFILE_ESNEXT
+ ['--line-info=on', '--error-messages=on', '--mem-heap=20480']),
]
# Test options for jerry-debugger
DEBUGGER_TEST_OPTIONS = [
Options('jerry_debugger_tests',
OPTIONS_DEBUG + ['--jerry-debugger=on'])
]
# Test options for buildoption-test
JERRY_BUILDOPTIONS = [
Options('buildoption_test-lto',
['--lto=on']),
Options('buildoption_test-error_messages',
['--error-messages=on']),
Options('buildoption_test-logging',
['--logging=on']),
Options('buildoption_test-all_in_one',
['--all-in-one=on']),
Options('buildoption_test-valgrind',
['--valgrind=on']),
Options('buildoption_test-mem_stats',
['--mem-stats=on']),
Options('buildoption_test-show_opcodes',
['--show-opcodes=on']),
Options('buildoption_test-show_regexp_opcodes',
['--show-regexp-opcodes=on']),
Options('buildoption_test-cpointer_32bit',
['--compile-flag=-m32', '--cpointer-32bit=on', '--system-allocator=on'],
skip=skip_if(
platform.system() != 'Linux' or (platform.machine() != 'i386' and platform.machine() != 'x86_64'),
'-m32 is only supported on x86[-64]-linux')
),
Options('buildoption_test-jerry_math',
['--jerry-math=on']),
Options('buildoption_test-no_lcache_prophashmap',
['--compile-flag=-DJERRY_LCACHE=0', '--compile-flag=-DJERRY_PROPRETY_HASHMAP=0']),
Options('buildoption_test-external_context',
['--external-context=on']),
Options('buildoption_test-shared_libs',
['--shared-libs=on'],
skip=skip_if((sys.platform == 'win32'), 'Not yet supported, link failure on Windows')),
Options('buildoption_test-cmdline_test',
['--jerry-cmdline-test=on'],
skip=skip_if((sys.platform == 'win32'), 'rand() can\'t be overriden on Windows (benchmarking.c)')),
Options('buildoption_test-cmdline_snapshot',
['--jerry-cmdline-snapshot=on']),
Options('buildoption_test-recursion_limit',
OPTIONS_STACK_LIMIT),
Options('buildoption_test-gc-mark_limit',
OPTIONS_GC_MARK_LIMIT),
Options('buildoption_test-single-source',
['--cmake-param=-DENABLE_ALL_IN_ONE_SOURCE=ON']),
Options('buildoption_test-jerry-debugger',
['--jerry-debugger=on']),
Options('buildoption_test-module-off',
['--compile-flag=-DJERRY_MODULE_SYSTEM=0', '--lto=off']),
Options('buildoption_test-builtin-proxy-off',
['--compile-flag=-DJERRY_BUILTIN_PROXY=0']),
]
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--toolchain', metavar='FILE',
help='Add toolchain file')
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print out failing tests')
parser.add_argument('--buildoptions', metavar='LIST',
help='Add a comma separated list of extra build options to each test')
parser.add_argument('--skip-list', metavar='LIST',
help='Add a comma separated list of patterns of the excluded JS-tests')
parser.add_argument('--outdir', metavar='DIR', default=OUTPUT_DIR,
help='Specify output directory (default: %(default)s)')
parser.add_argument('--check-signed-off', metavar='TYPE', nargs='?',
choices=['strict', 'tolerant', 'gh-actions'], const='strict',
help='Run signed-off check (%(choices)s; default type if not given: %(const)s)')
parser.add_argument('--check-cppcheck', action='store_true',
help='Run cppcheck')
parser.add_argument('--check-doxygen', action='store_true',
help='Run doxygen')
parser.add_argument('--check-pylint', action='store_true',
help='Run pylint')
parser.add_argument('--check-vera', action='store_true',
help='Run vera check')
parser.add_argument('--check-license', action='store_true',
help='Run license check')
parser.add_argument('--check-magic-strings', action='store_true',
help='Run "magic string source code generator should be executed" check')
parser.add_argument('--jerry-debugger', action='store_true',
help='Run jerry-debugger tests')
parser.add_argument('--jerry-tests', action='store_true',
help='Run jerry-tests')
parser.add_argument('--test262', action='store_true',
help='Run test262 - ES5.1')
parser.add_argument('--test262-es2015', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES2015. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-esnext', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ESnext. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-test-list', metavar='LIST',
help='Add a comma separated list of tests or directories to run in test262 test suite')
parser.add_argument('--unittests', action='store_true',
help='Run unittests (including doctests)')
parser.add_argument('--buildoption-test', action='store_true',
help='Run buildoption-test')
parser.add_argument('--all', '--precommit', action='store_true',
help='Run all tests')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
script_args = parser.parse_args()
if script_args.test262_test_list and not \
(script_args.test262 or script_args.test262_es2015 or script_args.test262_esnext):
print("--test262-test-list is only allowed with --test262 or --test262-es2015 or --test262-esnext\n")
parser.print_help()
sys.exit(1)
return script_args
BINARY_CACHE = {}
TERM_NORMAL = '\033[0m'
TERM_YELLOW = '\033[1;33m'
TERM_BLUE = '\033[1;34m'
TERM_RED = '\033[1;31m'
def report_command(cmd_type, cmd, env=None):
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, cmd_type, TERM_NORMAL))
if env is not None:
sys.stderr.write(''.join('%s%s=%r \\%s\n' % (TERM_BLUE, var, val, TERM_NORMAL)
for var, val in sorted(env.items())))
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, (' \\%s\n\t%s' % (TERM_NORMAL, TERM_BLUE)).join(cmd), TERM_NORMAL))
def report_skip(job):
sys.stderr.write('%sSkipping: %s' % (TERM_YELLOW, job.name))
if job.skip:
sys.stderr.write(' (%s)' % job.skip)
sys.stderr.write('%s\n' % TERM_NORMAL)
def get_platform_cmd_prefix():
if sys.platform == 'win32':
return ['cmd', '/S', '/C']
return []
def create_binary(job, options):
build_args = job.build_args[:]
if options.buildoptions:
for option in options.buildoptions.split(','):
if option not in build_args:
build_args.append(option)
build_cmd = get_platform_cmd_prefix()
build_cmd.append(settings.BUILD_SCRIPT)
build_cmd.extend(build_args)
build_dir_path = os.path.join(options.outdir, job.name)
build_cmd.append('--builddir=%s' % build_dir_path)
install_dir_path = os.path.join(build_dir_path, 'local')
build_cmd.append('--install=%s' % install_dir_path)
if options.toolchain:
build_cmd.append('--toolchain=%s' % options.toolchain)
report_command('Build command:', build_cmd)
binary_key = tuple(sorted(build_args))
if binary_key in BINARY_CACHE:
ret, build_dir_path = BINARY_CACHE[binary_key]
sys.stderr.write('(skipping: already built at %s with returncode %d)\n' % (build_dir_path, ret))
return ret, build_dir_path
try:
subprocess.check_output(build_cmd)
ret = 0
except subprocess.CalledProcessError as err:
print(err.output)
ret = err.returncode
BINARY_CACHE[binary_key] = (ret, build_dir_path)
return ret, build_dir_path
def get_binary_path(build_dir_path):
executable_extension = '.exe' if sys.platform == 'win32' else ''
return os.path.join(build_dir_path, 'local', 'bin', 'jerry' + executable_extension)
def hash_binary(bin_path):
blocksize = 65536
hasher = hashlib.sha1()
with open(bin_path, 'rb') as bin_file:
buf = bin_file.read(blocksize)
while buf:
hasher.update(buf)
buf = bin_file.read(blocksize)
return hasher.hexdigest()
def iterate_test_runner_jobs(jobs, options):
tested_paths = set()
tested_hashes = {}
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
yield job, ret_build, None
if build_dir_path in tested_paths:
sys.stderr.write('(skipping: already tested with %s)\n' % build_dir_path)
continue
else:
tested_paths.add(build_dir_path)
bin_path = get_binary_path(build_dir_path)
bin_hash = hash_binary(bin_path)
if bin_hash in tested_hashes:
sys.stderr.write('(skipping: already tested with equivalent %s)\n' % tested_hashes[bin_hash])
continue
else:
tested_hashes[bin_hash] = build_dir_path
test_cmd = get_platform_cmd_prefix()
test_cmd.extend([settings.TEST_RUNNER_SCRIPT, '--engine', bin_path])
yield job, ret_build, test_cmd
def run_check(runnable, env=None):
report_command('Test command:', runnable, env=env)
if env is not None:
full_env = dict(os.environ)
full_env.update(env)
env = full_env
proc = subprocess.Popen(runnable, env=env)
proc.wait()
return proc.returncode
def run_jerry_debugger_tests(options):
ret_build = ret_test = 0
for job in DEBUGGER_TEST_OPTIONS:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
for channel in ["websocket", "rawpacket"]:
for test_file in os.listdir(settings.DEBUGGER_TESTS_DIR):
if test_file.endswith(".cmd"):
test_case, _ = os.path.splitext(test_file)
test_case_path = os.path.join(settings.DEBUGGER_TESTS_DIR, test_case)
test_cmd = [
settings.DEBUGGER_TEST_RUNNER_SCRIPT,
get_binary_path(build_dir_path),
channel,
settings.DEBUGGER_CLIENT_SCRIPT,
os.path.relpath(test_case_path, settings.PROJECT_DIR)
]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_jerry_tests(options):
ret_build = ret_test = 0
for job, ret_build, test_cmd in iterate_test_runner_jobs(JERRY_TESTS_OPTIONS, options):
if ret_build:
break
test_cmd.append('--test-dir')
test_cmd.append(settings.JERRY_TESTS_DIR)
if options.quiet:
test_cmd.append("-q")
skip_list = []
if '--profile=es.next' in job.build_args:
skip_list.append(os.path.join('es5.1', ''))
else:
skip_list.append(os.path.join('es.next', ''))
if options.skip_list:
skip_list.append(options.skip_list)
if skip_list:
test_cmd.append("--skip-list=" + ",".join(skip_list))
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd, env=dict(TZ='UTC'))
return ret_build | ret_test
def run_test262_test_suite(options):
ret_build = ret_test = 0
jobs = []
if options.test262:
jobs.extend(TEST262_TEST_SUITE_OPTIONS)
if options.test262_es2015:
jobs.extend(TEST262_ES2015_TEST_SUITE_OPTIONS)
if options.test262_esnext:
jobs.extend(TEST262_ESNEXT_TEST_SUITE_OPTIONS)
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
test_cmd = get_platform_cmd_prefix() + [
settings.TEST262_RUNNER_SCRIPT,
'--engine', get_binary_path(build_dir_path) + " --test262-object",
'--test-dir', settings.TEST262_TEST_SUITE_DIR
]
if job.name.endswith('es2015'):
test_cmd.append('--es2015')
test_cmd.append(options.test262_es2015)
elif job.name.endswith('esnext'):
test_cmd.append('--esnext')
test_cmd.append(options.test262_esnext)
else:
test_cmd.append('--es51')
if job.test_args:
test_cmd.extend(job.test_args)
if options.test262_test_list:
test_cmd.append('--test262-test-list')
test_cmd.append(options.test262_test_list)
ret_test |= run_check(test_cmd, env=dict(TZ='America/Los_Angeles'))
return ret_build | ret_test
def run_unittests(options):
ret_build = ret_test = 0
for job in JERRY_UNITTESTS_OPTIONS:
if job.skip:
report_skip(job)
continue
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
if sys.platform == 'win32':
if "--debug" in job.build_args:
build_config = "Debug"
else:
build_config = "MinSizeRel"
else:
build_config = ""
ret_test |= run_check(
get_platform_cmd_prefix() +
[settings.UNITTEST_RUNNER_SCRIPT] +
[os.path.join(build_dir_path, 'tests', build_config)] +
(["-q"] if options.quiet else [])
)
return ret_build | ret_test
def run_buildoption_test(options):
for job in JERRY_BUILDOPTIONS:
if job.skip:
report_skip(job)
continue
ret, _ = create_binary(job, options)
if ret:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
return ret
Check = collections.namedtuple('Check', ['enabled', 'runner', 'arg'])
def main(options):
checks = [
Check(options.check_signed_off, run_check, [settings.SIGNED_OFF_SCRIPT]
+ {'tolerant': ['--tolerant'], 'gh-actions': ['--gh-actions']}.get(options.check_signed_off, [])),
Check(options.check_cppcheck, run_check, [settings.CPPCHECK_SCRIPT]),
Check(options.check_doxygen, run_check, [settings.DOXYGEN_SCRIPT]),
Check(options.check_pylint, run_check, [settings.PYLINT_SCRIPT]),
Check(options.check_vera, run_check, [settings.VERA_SCRIPT]),
Check(options.check_license, run_check, [settings.LICENSE_SCRIPT]),
Check(options.check_magic_strings, run_check, [settings.MAGIC_STRINGS_SCRIPT]),
Check(options.jerry_debugger, run_jerry_debugger_tests, options),
Check(options.jerry_tests, run_jerry_tests, options),
Check(options.test262 or options.test262_es2015 or options.test262_esnext, run_test262_test_suite, options),
Check(options.unittests, run_unittests, options),
Check(options.buildoption_test, run_buildoption_test, options),
]
for check in checks:
if check.enabled or options.all:
ret = check.runner(check.arg)
if ret:
sys.exit(ret)
if __name__ == "__main__":
main(get_arguments())
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from django.core.urlresolvers import reverse
from exam import fixture
from social_auth.models import UserSocialAuth
from sentry.models import UserOption, LostPasswordHash, User
from sentry.testutils import TestCase
class RegisterTest(TestCase):
@fixture
def path(self):
return reverse('sentry-register')
def test_redirects_if_registration_disabled(self):
with self.feature('auth:register', False):
resp = self.client.get(self.path)
assert resp.status_code == 302
def test_renders_correct_template(self):
with self.feature('auth:register'):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/register.html')
def test_with_required_params(self):
with self.feature('auth:register'):
resp = self.client.post(self.path, {
'username': 'test-a-really-long-email-address@example.com',
'password': 'foobar',
})
assert resp.status_code == 302
user = User.objects.get(username='test-a-really-long-email-address@example.com')
assert user.email == 'test-a-really-long-email-address@example.com'
assert user.check_password('foobar')
class AppearanceSettingsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings-appearance')
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_does_use_template(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/appearance.html')
def test_does_save_settings(self):
self.login_as(self.user)
resp = self.client.post(self.path, {
'language': 'en',
'stacktrace_order': '2',
})
assert resp.status_code == 302
options = UserOption.objects.get_all_values(user=self.user, project=None)
assert options.get('language') == 'en'
assert options.get('stacktrace_order') == '2'
class SettingsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings')
def params(self, without=()):
params = {
'username': 'foobar',
'email': 'foo@example.com',
'first_name': 'Foo bar',
}
return dict((k, v) for k, v in params.iteritems() if k not in without)
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/settings.html')
assert 'form' in resp.context
def test_requires_email(self):
self.login_as(self.user)
resp = self.client.post(self.path, self.params(without=['email']))
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/settings.html')
assert 'form' in resp.context
assert 'email' in resp.context['form'].errors
def test_requires_first_name(self):
self.login_as(self.user)
resp = self.client.post(self.path, self.params(without=['first_name']))
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/settings.html')
assert 'form' in resp.context
assert 'first_name' in resp.context['form'].errors
def test_minimum_valid_params(self):
self.login_as(self.user)
params = self.params()
resp = self.client.post(self.path, params)
assert resp.status_code == 302
user = User.objects.get(id=self.user.id)
assert user.first_name == params['first_name']
assert user.email == params['email']
def test_can_change_password(self):
self.login_as(self.user)
params = self.params()
params['new_password'] = 'foobar'
resp = self.client.post(self.path, params)
assert resp.status_code == 302
user = User.objects.get(id=self.user.id)
assert user.check_password('foobar')
class NotificationSettingsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings-notifications')
def params(self, without=()):
params = {
'alert_email': 'foo@example.com',
}
return dict((k, v) for k, v in params.iteritems() if k not in without)
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/notifications.html')
assert 'form' in resp.context
def test_valid_params(self):
self.login_as(self.user)
params = self.params()
resp = self.client.post(self.path, params)
assert resp.status_code == 302
options = UserOption.objects.get_all_values(user=self.user, project=None)
assert options.get('alert_email') == 'foo@example.com'
class ListIdentitiesTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-settings-identities')
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
UserSocialAuth.objects.create(user=self.user, provider='github')
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/identities.html')
assert 'identity_list' in resp.context
assert 'AUTH_PROVIDERS' in resp.context
class RecoverPasswordTest(TestCase):
@fixture
def path(self):
return reverse('sentry-account-recover')
def test_renders_with_required_context(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/index.html')
assert 'form' in resp.context
def test_invalid_username(self):
resp = self.client.post(self.path, {
'user': 'nonexistent'
})
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/index.html')
assert 'form' in resp.context
assert 'user' in resp.context['form'].errors
@mock.patch('sentry.models.LostPasswordHash.send_recover_mail')
def test_valid_username(self, send_recover_mail):
resp = self.client.post(self.path, {
'user': self.user.username
})
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/sent.html')
assert 'email' in resp.context
send_recover_mail.assert_called_once_with()
class RecoverPasswordConfirmTest(TestCase):
def setUp(self):
super(RecoverPasswordConfirmTest, self).setUp()
self.password_hash = LostPasswordHash.objects.create(user=self.user)
@fixture
def path(self):
return reverse('sentry-account-recover-confirm', args=[self.user.id, self.password_hash.hash])
def test_valid_token(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/confirm.html')
def test_invalid_token(self):
resp = self.client.get(reverse('sentry-account-recover-confirm', args=[1, 'adfadsf']))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/account/recover/failure.html')
def test_change_password(self):
resp = self.client.post(self.path, {
'password': 'bar',
'confirm_password': 'bar'
})
assert resp.status_code == 302
user = User.objects.get(id=self.user.id)
assert user.check_password('bar')
| |
# Copyright (c) 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
from six.moves.urllib.parse import urlparse
from swift.common.swob import Request, Response, HTTPUnauthorized
from swift.common.middleware import staticweb
meta_map = {
'c1': {'status': 401},
'c2': {},
'c3': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c3b': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c4': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css',
'web-directory-type': 'text/dir'}},
'c5': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css'}},
'c6': {'meta': {'web-listings': 't',
'web-error': 'error.html'}},
'c6b': {'meta': {'web-listings': 't',
'web-listings-label': 'foo'}},
'c7': {'meta': {'web-listings': 'f',
'web-error': 'error.html'}},
'c8': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'http://localhost/stylesheets/listing.css'}},
'c9': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'/absolute/listing.css'}},
'c10': {'meta': {'web-listings': 't'}},
'c11': {'meta': {'web-index': 'index.html'}},
'c11a': {'meta': {'web-index': 'index.html',
'web-directory-type': 'text/directory'}},
'c12': {'meta': {'web-index': 'index.html',
'web-error': 'error.html'}},
'c13': {'meta': {'web-listings': 'f',
'web-listings-css': 'listing.css'}},
}
def mock_get_container_info(env, app, swift_source='SW'):
container = env['PATH_INFO'].rstrip('/').split('/')[3]
container_info = meta_map[container]
container_info.setdefault('status', 200)
container_info.setdefault('read_acl', '.r:*')
return container_info
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.get_c4_called = False
def __call__(self, env, start_response):
self.calls += 1
if 'swift.authorize' in env:
resp = env['swift.authorize'](Request(env))
if resp:
return resp(env, start_response)
if env['PATH_INFO'] == '/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1':
return Response(
status='412 Precondition Failed')(env, start_response)
elif env['PATH_INFO'] == '/v1/a':
return Response(status='401 Unauthorized')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c1':
return Response(status='401 Unauthorized')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2/one.txt':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/index.html':
return Response(status='200 Ok', body='''
<html>
<body>
<h1>Test main index.html file.</h1>
<p>Visit <a href="subdir">subdir</a>.</p>
<p>Don't visit <a href="subdir2/">subdir2</a> because it doesn't really
exist.</p>
<p>Visit <a href="subdir3">subdir3</a>.</p>
<p>Visit <a href="subdir3/subsubdir">subdir3/subsubdir</a>.</p>
</body>
</html>
''')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b/index.html':
resp = Response(status='204 No Content')
resp.app_iter = iter([])
return resp(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/index.html':
return Response(status='200 Ok', body='index file')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4':
self.get_c4_called = True
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/one.txt':
return Response(
status='200 Ok',
headers={'x-object-meta-test': 'value'},
body='1')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/two.txt':
return Response(status='503 Service Unavailable')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c4/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/404error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Chrome's 404 fancy-page sucks.</p>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/index.html':
return Response(status='503 Service Unavailable')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c5/503error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/404error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6b':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6/subdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6/401error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Hey, you're not authorized to see this!</p>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c7', '/v1/a/c7/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c7/404error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c7/401error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Hey, you're not authorized to see this!</p>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c8', '/v1/a/c8/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c8/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c9', '/v1/a/c9/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c9/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c10', '/v1/a/c10/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c10/\xe2\x98\x83/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c11', '/v1/a/c11/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir/':
return Response(status='200 Ok', headers={
'Content-Type': 'application/directory'})(
env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir/index.html':
return Response(status='200 Ok', body='''
<html>
<body>
<h2>c11 subdir index</h2>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir2/':
return Response(status='200 Ok', headers={'Content-Type':
'application/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir2/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c11a', '/v1/a/c11a/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir/':
return Response(status='200 Ok', headers={'Content-Type':
'text/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir2/':
return Response(status='200 Ok', headers={'Content-Type':
'application/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir2/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir3/':
return Response(status='200 Ok', headers={'Content-Type':
'not_a/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir3/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c12/index.html':
return Response(status='200 Ok', body='index file')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c12/200error.html':
return Response(status='200 Ok', body='error file')(env,
start_response)
else:
raise Exception('Unknown path %r' % env['PATH_INFO'])
def listing(self, env, start_response):
headers = {'x-container-read': '.r:*'}
if ((env['PATH_INFO'] in (
'/v1/a/c3', '/v1/a/c4', '/v1/a/c8', '/v1/a/c9'))
and (env['QUERY_STRING'] ==
'delimiter=/&format=json&prefix=subdir/')):
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"name":"subdir/2.txt",
"hash":"c85c1dcd19cf5cbac84e6043c31bb63e", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.734140"},
{"subdir":"subdir3/subsubdir/"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \
'delimiter=/&format=json&prefix=subdiry/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '[]'
elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \
'limit=1&format=json&delimiter=/&limit=1&prefix=subdirz/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdirz/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c6' and env['QUERY_STRING'] == \
'limit=1&format=json&delimiter=/&limit=1&prefix=subdir/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'X-Container-Web-Listings': 't',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c10' and (
env['QUERY_STRING'] ==
'delimiter=/&format=json&prefix=%E2%98%83/' or
env['QUERY_STRING'] ==
'delimiter=/&format=json&prefix=%E2%98%83/%E2%98%83/'):
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'X-Container-Web-Listings': 't',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"\u2603/\u2603/one.txt",
"hash":"73f1dd69bacbf0847cc9cffa3c6b23a1", "bytes":22,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"subdir":"\u2603/\u2603/"}]
'''.strip()
elif 'prefix=' in env['QUERY_STRING']:
return Response(status='204 No Content')(env, start_response)
elif 'format=json' in env['QUERY_STRING']:
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"401error.html",
"hash":"893f8d80692a4d3875b45be8f152ad18", "bytes":110,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.713710"},
{"name":"404error.html",
"hash":"62dcec9c34ed2b347d94e6ca707aff8c", "bytes":130,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.720850"},
{"name":"index.html",
"hash":"8b469f2ca117668a5131fe9ee0815421", "bytes":347,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.683590"},
{"name":"listing.css",
"hash":"7eab5d169f3fcd06a08c130fa10c5236", "bytes":17,
"content_type":"text/css",
"last_modified":"2011-03-24T04:27:52.721610"},
{"name":"one.txt", "hash":"73f1dd69bacbf0847cc9cffa3c6b23a1",
"bytes":22, "content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.722270"},
{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"name":"subdir/2.txt",
"hash":"c85c1dcd19cf5cbac84e6043c31bb63e", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.734140"},
{"name":"subdir/\u2603.txt",
"hash":"7337d028c093130898d937c319cc9865", "bytes":72981,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.735460"},
{"name":"subdir2", "hash":"d41d8cd98f00b204e9800998ecf8427e",
"bytes":0, "content_type":"text/directory",
"last_modified":"2011-03-24T04:27:52.676690"},
{"name":"subdir3/subsubdir/index.html",
"hash":"04eea67110f883b1a5c97eb44ccad08c", "bytes":72,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.751260"},
{"name":"two.txt", "hash":"10abb84c63a5cff379fdfd6385918833",
"bytes":22, "content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.825110"},
{"name":"\u2603/\u2603/one.txt",
"hash":"73f1dd69bacbf0847cc9cffa3c6b23a1", "bytes":22,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.935560"}]
'''.strip()
else:
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'Content-Type': 'text/plain; charset=utf-8'})
body = '\n'.join(['401error.html', '404error.html', 'index.html',
'listing.css', 'one.txt', 'subdir/1.txt',
'subdir/2.txt', u'subdir/\u2603.txt', 'subdir2',
'subdir3/subsubdir/index.html', 'two.txt',
u'\u2603/\u2603/one.txt'])
return Response(status='200 Ok', headers=headers,
body=body)(env, start_response)
class FakeAuthFilter(object):
def __init__(self, app, deny_objects=False, deny_listing=False):
self.app = app
self.deny_objects = deny_objects
self.deny_listing = deny_listing
def authorize(self, req):
path_parts = req.path.strip('/').split('/')
if ((self.deny_objects and len(path_parts) > 3)
or (self.deny_listing and len(path_parts) == 3)):
return HTTPUnauthorized()
def __call__(self, env, start_response):
env['swift.authorize'] = self.authorize
return self.app(env, start_response)
class TestStaticWeb(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app))
self._orig_get_container_info = staticweb.get_container_info
staticweb.get_container_info = mock_get_container_info
def tearDown(self):
staticweb.get_container_info = self._orig_get_container_info
def test_app_set(self):
app = FakeApp()
sw = staticweb.filter_factory({})(app)
self.assertEqual(sw.app, app)
def test_conf_set(self):
conf = {'blah': 1}
sw = staticweb.filter_factory(conf)(FakeApp())
self.assertEqual(sw.conf, conf)
def test_root(self):
resp = Request.blank('/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_version(self):
resp = Request.blank('/v1').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 412)
def test_account(self):
resp = Request.blank('/v1/a').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 401)
def test_container1(self):
resp = Request.blank('/v1/a/c1').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 401)
def test_container1_web_mode_explicitly_off(self):
resp = Request.blank('/v1/a/c1',
headers={'x-web-mode': 'false'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 401)
def test_container1_web_mode_explicitly_on(self):
resp = Request.blank('/v1/a/c1',
headers={'x-web-mode': 'true'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container2(self):
resp = Request.blank('/v1/a/c2').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(len(resp.body.split('\n')),
int(resp.headers['x-container-object-count']))
def test_container2_web_mode_explicitly_off(self):
resp = Request.blank(
'/v1/a/c2',
headers={'x-web-mode': 'false'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(len(resp.body.split('\n')),
int(resp.headers['x-container-object-count']))
def test_container2_web_mode_explicitly_on(self):
resp = Request.blank(
'/v1/a/c2',
headers={'x-web-mode': 'true'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container2onetxt(self):
resp = Request.blank(
'/v1/a/c2/one.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container2json(self):
resp = Request.blank(
'/v1/a/c2?format=json').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2json_web_mode_explicitly_off(self):
resp = Request.blank(
'/v1/a/c2?format=json',
headers={'x-web-mode': 'false'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2json_web_mode_explicitly_on(self):
resp = Request.blank(
'/v1/a/c2?format=json',
headers={'x-web-mode': 'true'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container3(self):
resp = Request.blank('/v1/a/c3').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers['location'],
'http://localhost/v1/a/c3/')
def test_container3indexhtml(self):
resp = Request.blank('/v1/a/c3/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Test main index.html file.' in resp.body)
def test_container3subsubdir(self):
resp = Request.blank(
'/v1/a/c3/subdir3/subsubdir').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container3subsubdircontents(self):
resp = Request.blank(
'/v1/a/c3/subdir3/subsubdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'index file')
def test_container3subdir(self):
resp = Request.blank(
'/v1/a/c3/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c3/subdir/' in resp.body)
self.assertTrue('</style>' in resp.body)
self.assertNotIn('<link', resp.body)
self.assertNotIn('listing.css', resp.body)
def test_container3subdirx(self):
resp = Request.blank(
'/v1/a/c3/subdirx/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container3subdiry(self):
resp = Request.blank(
'/v1/a/c3/subdiry/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container3subdirz(self):
resp = Request.blank(
'/v1/a/c3/subdirz').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container3unknown(self):
resp = Request.blank(
'/v1/a/c3/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertNotIn("Chrome's 404 fancy-page sucks.", resp.body)
def test_container3bindexhtml(self):
resp = Request.blank('/v1/a/c3b/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.body, '')
def test_container4indexhtml(self):
resp = Request.blank('/v1/a/c4/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c4/' in resp.body)
self.assertTrue('href="listing.css"' in resp.body)
def test_container4indexhtmlauthed(self):
resp = Request.blank('/v1/a/c4').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
resp = Request.blank(
'/v1/a/c4',
environ={'REMOTE_USER': 'authed'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
resp = Request.blank(
'/v1/a/c4', headers={'x-web-mode': 't'},
environ={'REMOTE_USER': 'authed'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container4unknown(self):
resp = Request.blank(
'/v1/a/c4/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertTrue("Chrome's 404 fancy-page sucks." in resp.body)
def test_container4subdir(self):
resp = Request.blank(
'/v1/a/c4/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c4/subdir/' in resp.body)
self.assertNotIn('</style>', resp.body)
self.assertTrue('<link' in resp.body)
self.assertTrue('href="../listing.css"' in resp.body)
self.assertEqual(resp.headers['content-type'],
'text/html; charset=UTF-8')
def test_container4onetxt(self):
resp = Request.blank(
'/v1/a/c4/one.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_container4twotxt(self):
resp = Request.blank(
'/v1/a/c4/two.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 503)
def test_container5indexhtml(self):
resp = Request.blank('/v1/a/c5/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 503)
def test_container5unknown(self):
resp = Request.blank(
'/v1/a/c5/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertNotIn("Chrome's 404 fancy-page sucks.", resp.body)
def test_container6subdir(self):
resp = Request.blank(
'/v1/a/c6/subdir').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container6listing(self):
# container6 has web-listings = t, web-error=error.html
resp = Request.blank('/v1/a/c6/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
# expect custom 401 if request is not auth'd for listing but is auth'd
# to GET objects
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True)
resp = Request.blank('/v1/a/c6/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertIn("Hey, you're not authorized to see this!", resp.body)
# expect default 401 if request is not auth'd for listing or object GET
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True,
deny_objects=True)
resp = Request.blank('/v1/a/c6/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertNotIn("Hey, you're not authorized to see this!", resp.body)
def test_container6blisting(self):
label = 'Listing of {0}/'.format(
meta_map['c6b']['meta']['web-listings-label'])
resp = Request.blank('/v1/a/c6b/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(label, resp.body)
def test_container7listing(self):
# container7 has web-listings = f, web-error=error.html
resp = Request.blank('/v1/a/c7/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertIn("Web Listing Disabled", resp.body)
# expect 301 if auth'd but no trailing '/'
resp = Request.blank('/v1/a/c7').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
# expect default 401 if request is not auth'd and no trailing '/'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True,
deny_objects=True)
resp = Request.blank('/v1/a/c7').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertNotIn("Hey, you're not authorized to see this!", resp.body)
# expect custom 401 if request is not auth'd for listing
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True)
resp = Request.blank('/v1/a/c7/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertIn("Hey, you're not authorized to see this!", resp.body)
# expect default 401 if request is not auth'd for listing or object GET
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True,
deny_objects=True)
resp = Request.blank('/v1/a/c7/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertNotIn("Hey, you're not authorized to see this!", resp.body)
def test_container8listingcss(self):
resp = Request.blank(
'/v1/a/c8/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c8/' in resp.body)
self.assertTrue('<link' in resp.body)
self.assertTrue(
'href="http://localhost/stylesheets/listing.css"' in resp.body)
def test_container8subdirlistingcss(self):
resp = Request.blank(
'/v1/a/c8/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c8/subdir/' in resp.body)
self.assertTrue('<link' in resp.body)
self.assertTrue(
'href="http://localhost/stylesheets/listing.css"' in resp.body)
def test_container9listingcss(self):
resp = Request.blank(
'/v1/a/c9/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c9/' in resp.body)
self.assertTrue('<link' in resp.body)
self.assertTrue('href="/absolute/listing.css"' in resp.body)
def test_container9subdirlistingcss(self):
resp = Request.blank(
'/v1/a/c9/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c9/subdir/' in resp.body)
self.assertTrue('<link' in resp.body)
self.assertTrue('href="/absolute/listing.css"' in resp.body)
def test_container10unicodesubdirlisting(self):
resp = Request.blank(
'/v1/a/c10/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c10/' in resp.body)
resp = Request.blank(
'/v1/a/c10/\xe2\x98\x83/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('Listing of /v1/a/c10/\xe2\x98\x83/' in resp.body)
resp = Request.blank(
'/v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/'
).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue(
'Listing of /v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/' in resp.body)
def test_container11subdirmarkerobjectindex(self):
resp = Request.blank('/v1/a/c11/subdir/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('<h2>c11 subdir index</h2>' in resp.body)
def test_container11subdirmarkermatchdirtype(self):
resp = Request.blank('/v1/a/c11a/subdir/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertTrue('Index File Not Found' in resp.body)
def test_container11subdirmarkeraltdirtype(self):
resp = Request.blank('/v1/a/c11a/subdir2/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_container11subdirmarkerinvaliddirtype(self):
resp = Request.blank('/v1/a/c11a/subdir3/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_container12unredirectedrequest(self):
resp = Request.blank('/v1/a/c12/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertTrue('index file' in resp.body)
def test_container_404_has_css(self):
resp = Request.blank('/v1/a/c13/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertTrue('listing.css' in resp.body)
def test_container_404_has_no_css(self):
resp = Request.blank('/v1/a/c7/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertNotIn('listing.css', resp.body)
self.assertTrue('<style' in resp.body)
def test_subrequest_once_if_possible(self):
resp = Request.blank(
'/v1/a/c4/one.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-object-meta-test'], 'value')
self.assertEqual(resp.body, '1')
self.assertEqual(self.app.calls, 1)
def test_no_auth_middleware(self):
resp = Request.blank('/v1/a/c3').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
# Test without an authentication middleware before staticweb
# This is no longer handled by staticweb middleware, thus not returning
# a 301 redirect
self.test_staticweb = staticweb.filter_factory({})(self.app)
resp = Request.blank('/v1/a/c3').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_subrequest_not_override_auth(self):
app_call = \
'swift.common.middleware.staticweb._StaticWebContext._app_call'
orig_app_call = staticweb._StaticWebContext._app_call
_fail = self.fail
def hook_app_call(self, env):
if 'swift.authorize_override' in env:
_fail('staticweb must not create authorize info by itself')
return orig_app_call(self, env)
with mock.patch(app_call, hook_app_call):
# testing for _listing container
resp = Request.blank('/v1/a/c4/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200) # sanity
# testing for _listing object subdir
resp = Request.blank(
'/v1/a/c4/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
# testing for _error_response
resp = Request.blank('/v1/a/c5/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 503) # sanity
class TestStaticWebUrlBase(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self._orig_get_container_info = staticweb.get_container_info
staticweb.get_container_info = mock_get_container_info
def tearDown(self):
staticweb.get_container_info = self._orig_get_container_info
def test_container3subdirz_scheme(self):
path = '/v1/a/c3/subdirz'
scheme = 'https'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({'url_base': 'https://'})(self.app))
resp = Request.blank(path).get_response(test_staticweb)
self.assertEqual(resp.status_int, 301)
parsed = urlparse(resp.location)
self.assertEqual(parsed.scheme, scheme)
# We omit comparing netloc here, because swob is free to add port.
self.assertEqual(parsed.path, path + '/')
def test_container3subdirz_host(self):
path = '/v1/a/c3/subdirz'
netloc = 'example.com'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({
'url_base': '//%s' % (netloc,)})(self.app))
resp = Request.blank(path).get_response(test_staticweb)
self.assertEqual(resp.status_int, 301)
parsed = urlparse(resp.location)
# We compare scheme with the default. This may change, but unlikely.
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.netloc, netloc)
self.assertEqual(parsed.path, path + '/')
def test_container3subdirz_both(self):
path = '/v1/a/c3/subdirz'
scheme = 'http'
netloc = 'example.com'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({
'url_base': 'http://example.com'})(self.app))
resp = Request.blank(path).get_response(test_staticweb)
self.assertEqual(resp.status_int, 301)
parsed = urlparse(resp.location)
self.assertEqual(parsed.scheme, scheme)
self.assertEqual(parsed.netloc, netloc)
self.assertEqual(parsed.path, path + '/')
if __name__ == '__main__':
unittest.main()
| |
"""This module contains the class that defines the interaction between
different modules that govern agent's behavior.
"""
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
from torch import zeros, ones, tensor
from pyro import clear_param_store, get_param_store
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.enum import get_importance_trace
from pyro.infer.predictive import Predictive
from pyro.optim import Adam
__all__ = [
'Inferrer'
]
class Inferrer(object):
def __init__(self, agent, stimulus, responses, mask=None, fixed_params=None):
self.agent = agent # agent used for computing response probabilities
self.stimulus = stimulus # stimulus and action outcomes presented to each participant
self.responses = responses # measured behavioral data accross all subjects
self.nb, self.nt, self.runs = self.responses.shape
# set a mask for excluding certain responses (e.g. NaN resonses) from
# the computations of the log-model evidence and the posterior beliefs over
# parameter values
if mask is not None:
self.notnans = mask
else:
self.notnans = ones(self.nb, self.nt, self.runs, dtype=torch.bool)
if fixed_params is not None:
n_fixed = len(fixed_params['labels'])
self.npar = agent.npar - n_fixed
self.locs = {}
self.locs['fixed'] = fixed_params['labels']
self.locs['free'] = list(set(range(agent.npar)) - set(fixed_params['labels']))
self.values = fixed_params['values']
self.fixed_values = True
else:
self.npar = agent.npar
self.fixed_values = False
def model(self):
"""
Full generative model of behavior.
"""
raise NotImplementedError
def guide(self):
"""Approximate posterior over model parameters.
"""
raise NotImplementedError
def infer_posterior(self,
iter_steps=10000,
num_particles=100,
optim_kwargs={'lr': .01}):
"""Perform SVI over free model parameters.
"""
clear_param_store()
svi = SVI(model=self.model,
guide=self.guide,
optim=Adam(optim_kwargs),
loss=Trace_ELBO(num_particles=num_particles,
vectorize_particles=True))
loss = []
pbar = tqdm(range(iter_steps), position=0)
for step in pbar:
loss.append(svi.step())
pbar.set_description("Mean ELBO %6.2f" % tensor(loss[-20:]).mean())
if np.isnan(loss[-1]):
break
self.loss = loss
def sample_posterior(self, labels, num_samples=10000):
"""Generate samples from posterior distribution.
"""
nsub = self.runs
npar = self.npar
assert npar == len(labels)
predict = Predictive(self.model, guide=self.guide, num_samples=num_samples, parallel=True)
samples = predict()
sites = ['tau', 'mu', 'locs']
predict = Predictive(self.model, guide=self.guide, num_samples=num_samples, return_sites=sites, parallel=True)
samples = predict()
subject_label = torch.arange(1, nsub+1).repeat(num_samples, 1).reshape(-1)
trans_pars_df = pd.DataFrame(data=samples['locs'].detach().reshape(-1, npar).numpy(), columns=labels)
trans_pars_df['subject'] = subject_label.numpy()
locs = samples['locs'].detach()
if self.fixed_values:
x = zeros(locs.shape[:-1] + (self.agent.npar,))
x[..., self.locs['fixed']] = self.values
x[..., self.locs['free']] = locs
else:
x = locs
self.agent.set_parameters(x)
pars = []
for lab in labels:
pars.append(getattr(self.agent, lab).reshape(-1).numpy())
pars_df = pd.DataFrame(data=np.stack(pars, -1), columns=labels)
pars_df['subject'] = subject_label.numpy()
mu = np.take(samples['mu'].detach().numpy(), 0, axis=-2)
tau = np.take(samples['tau'].detach().numpy(), 0, axis=-2)
mu_df = pd.DataFrame(data=mu, columns=labels)
tau_df = pd.DataFrame(data=tau, columns=labels)
return (trans_pars_df, pars_df, mu_df, tau_df)
def _get_quantiles(self, quantiles):
"""
Returns posterior quantiles each latent variable. Example::
print(agent.get_quantiles([0.05, 0.5, 0.95]))
:param quantiles: A list of requested quantiles between 0 and 1.
:type quantiles: torch.tensor or list
:return: A dict mapping sample site name to a list of quantile values.
:rtype: dict
"""
raise NotImplementedError
def formated_results(self, par_names, labels=None):
"""Returns median, 5th and 95th percentile for each parameter and subject.
"""
nsub = self.runs
npar = self.npar
if labels is None:
labels = par_names
quantiles = self._get_quantiles([.05, .5, .95])
locs = quantiles['locs'].transpose(dim0=0, dim1=-1).transpose(dim0=1, dim1=-1)
if self.fixed_values:
x = zeros(3, nsub, npar)
x[..., self.locs['fixed']] = self.values
x[..., self.locs['free']] = locs.detach()
else:
x = locs.detach()
self.agent.set_parameters(x, set_variables=False)
par_values = {}
for name in par_names:
values = getattr(self.agent, name)
if values.dim() < 3:
values = values.unsqueeze(dim=-1)
par_values[name] = values
count = {}
percentiles = {}
for name in par_names:
count.setdefault(name, 0)
for lbl in labels:
if lbl.startswith(name):
percentiles[lbl] = par_values[name][..., count[name]].numpy().reshape(-1)
count[name] += 1
df_percentiles = pd.DataFrame(percentiles)
subjects = torch.arange(1, nsub+1).repeat(3, 1).reshape(-1)
df_percentiles['subjects'] = subjects.numpy()
from numpy import tile, array
variables = tile(array(['5th', 'median', '95th']), [nsub, 1]).T.reshape(-1)
df_percentiles['variables'] = variables
return df_percentiles.melt(id_vars=['subjects', 'variables'], var_name='parameter')
def get_log_evidence_per_subject(self, num_particles=100, max_plate_nesting=1):
"""Return subject specific log model evidence"""
model = self.model
guide = self.guide
notnans = self.notnans
elbo = zeros(self.runs)
for i in range(num_particles):
model_trace, guide_trace = get_importance_trace('flat', max_plate_nesting, model, guide)
obs_log_probs = zeros(notnans.shape)
for site in model_trace.nodes.values():
if site['name'].startswith('obs'):
obs_log_probs[notnans] = site['log_prob'].detach()
elif site['name'] == 'locs':
elbo += site['log_prob'].detach()
elbo += torch.einsum('ijk->k', obs_log_probs)
for site in guide_trace.nodes.values():
if site['name'] == 'locs':
elbo -= site['log_prob'].detach()
return elbo/num_particles
| |
"""Implementations of osid abstract base class search_orders."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class OsidSearchOrder:
"""``OsidSearchOrder`` specifies preferred ordering of search results.
An ``OsidSearchOrder`` is available from an search session and
supplied to an ``OsidSearch`` interface. OsidSearch os =
session.getObjectSearch(); os.limitResultSet(1, 25); OsidSearchOrder
order = session.getObjectSearchOrder(); order.orderByDisplayName();
os.orderResults(order); OsidQuery queru; query =
session.getObjectQuery(); query.addDescriptionMatch("*food*",
wildcardStringMatchType, true); ObjectSearchResults results =
session.getObjectsBySearch(query, os); ObjectList list =
results.getObjectList();
"""
__metaclass__ = abc.ABCMeta
class OsidIdentifiableSearchOrder:
"""``OsidIdentifiableSearchOrder`` specifies preferred ordering of search results.
An ``OsidSearchOrder`` is available from an search session and
supplied to an ``OsidSearch``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_id(self, style):
"""Specifies a preference for ordering the result set by the ``Id``.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OsidExtensibleSearchOrder:
"""``OsidExtensibleSearchOrder`` specifies preferred ordering of search results.
An ``OsidSearchOrder`` is available from an search session and
supplied to an ``OsidSearch``.
"""
__metaclass__ = abc.ABCMeta
class OsidBrowsableSearchOrder:
"""``OsidBrowsableSearchOrder`` specifies preferred ordering of search results.
An ``OsidSearchOrder`` is available from an search session and
supplied to an ``OsidSearch``.
"""
__metaclass__ = abc.ABCMeta
class OsidTemporalSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_effective(self, style):
"""Specifies a preference for ordering the result set by the effective status.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_start_date(self, style):
"""Specifies a preference for ordering the result set by the start date.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_end_date(self, style):
"""Specifies a preference for ordering the result set by the end date.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OsidSubjugateableSearchOrder:
"""An interface for specifying the ordering of dependent object search results."""
__metaclass__ = abc.ABCMeta
class OsidAggregateableSearchOrder:
"""An interface for specifying the ordering of assemblage search results."""
__metaclass__ = abc.ABCMeta
class OsidContainableSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_sequestered(self, style):
"""Specifies a preference for ordering the result set by the sequestered flag.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OsidSourceableSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_provider(self, style):
"""Specifies a preference for ordering the results by provider.
The element of the provider to order is not specified but may be
managed through the provider ordering interface.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def supports_provider_search_order(self):
"""Tests if a ``ProviderSearchOrder`` interface is available.
:return: ``true`` if a provider search order interface is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_provider_search_order(self):
"""Gets the search order interface for a provider.
:return: the provider search order interface
:rtype: ``osid.resource.ResourceSearchOrder``
:raise: ``Unimplemented`` -- ``supports_provider_search_order()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_provider_search_order()`` is ``true``.*
"""
return # osid.resource.ResourceSearchOrder
provider_search_order = property(fget=get_provider_search_order)
class OsidFederateableSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
class OsidOperableSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_active(self, style):
"""Specifies a preference for ordering the result set by the active status.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_enabled(self, style):
"""Specifies a preference for ordering the result set by the administratively enabled status.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_disabled(self, style):
"""Specifies a preference for ordering the result set by the administratively disabled status.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_operational(self, style):
"""Specifies a preference for ordering the results by the operational status.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OsidObjectSearchOrder:
"""``OsidObjectSearchOrder`` specifies preferred ordering of search results.
An ``OsidSearchOrder`` is available from an search session and
supplied to an ``OsidSearch``. OsidObjectSearch os =
session.getObjectSearch(); os.limitResultSet(1, 25);
OsidObjectSearchOrder order = session.getObjectSearchOrder();
order.orderByDisplayName(); os.orderResults(order); OsidObjectQuery
query; query = session.getObjectQuery();
query.addDescriptionMatch("*food*", wildcardStringMatchType, true);
ObjectSearchResults results = session.getObjectsBySearch(query, os);
ObjectList list = results.getObjectList();
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_display_name(self, style):
"""Specifies a preference for ordering the result set by the display name.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_description(self, style):
"""Specifies a preference for ordering the result set by the description.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_genus_type(self, style):
"""Specifies a preference for ordering the result set by the genus type.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_state(self, process_id, style):
"""Orders by the state in a given ``Process``.
:param process_id: a process ``Id``
:type process_id: ``osid.id.Id``
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``process_id`` or ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_cumulative_rating(self, book_id, style):
"""Orders by the cumulative rating in a given ``Book``.
:param book_id: a book ``Id``
:type book_id: ``osid.id.Id``
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``book_id`` or ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_statistic(self, meter_id, style):
"""Orders by a statistic for a given ``Meter``.
:param meter_id: a meter ``Id``
:type meter_id: ``osid.id.Id``
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``meter_id`` or ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_create_time(self, style):
"""Orders by the timestamp of the first journal entry.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_last_modified_time(self, style):
"""Orders by the timestamp of the last journal entry.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OsidRelationshipSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_end_reason(self, style):
"""Specifies a preference for ordering the results by the end reason state.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def supports_end_reason_search_order(self):
"""Tests if a ``StateSearchOrder`` is available.
:return: ``true`` if a state search order is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_end_reason_search_order(self):
"""Gets the search order for a state.
:return: the state search order
:rtype: ``osid.process.StateSearchOrder``
:raise: ``Unimplemented`` -- ``supports_end_reason_search_order()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_end_reason_search_order()`` is ``true``.*
"""
return # osid.process.StateSearchOrder
end_reason_search_order = property(fget=get_end_reason_search_order)
class OsidCatalogSearchOrder:
"""An interface for specifying the ordering of catalog search results."""
__metaclass__ = abc.ABCMeta
class OsidRuleSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_rule(self, style):
"""Specifies a preference for ordering the results by the associated rule.
The element of the rule to order is not specified but may be
managed through a ``RuleSearchOrder``.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def supports_rule_search_order(self):
"""Tests if a ``RuleSearchOrder`` is available.
:return: ``true`` if a rule search order is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_rule_search_order(self):
"""Gets the search order for a rule.
:return: the rule search order
:rtype: ``osid.rules.RuleSearchOrder``
:raise: ``Unimplemented`` -- ``supports_rule_search_order()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_rule_search_order()`` is ``true``.*
"""
return # osid.rules.RuleSearchOrder
rule_search_order = property(fget=get_rule_search_order)
class OsidEnablerSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_schedule(self, style):
"""Specifies a preference for ordering the results by the associated schedule.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def supports_schedule_search_order(self):
"""Tests if a ``ScheduleSearchOrder`` is available.
:return: ``true`` if a schedule search order is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_schedule_search_order(self):
"""Gets the search order for a schedule.
:return: the schedule search order
:rtype: ``osid.calendaring.ScheduleSearchOrder``
:raise: ``Unimplemented`` -- ``supports_schedule_search_order() is false``
*compliance: optional -- This method must be implemented if
``supports_schedule_search_order()`` is true.*
"""
return # osid.calendaring.ScheduleSearchOrder
schedule_search_order = property(fget=get_schedule_search_order)
@abc.abstractmethod
def order_by_event(self, style):
"""Specifies a preference for ordering the results by the associated event.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def supports_event_search_order(self):
"""Tests if an ``EventSearchOrder`` is available.
:return: ``true`` if an event search order is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_event_search_order(self):
"""Gets the search order for an event.
:return: the event search order
:rtype: ``osid.calendaring.EventSearchOrder``
:raise: ``Unimplemented`` -- ``supports_event_search_order() is false``
*compliance: optional -- This method must be implemented if
``supports_event_search_order()`` is true.*
"""
return # osid.calendaring.EventSearchOrder
event_search_order = property(fget=get_event_search_order)
@abc.abstractmethod
def order_by_cyclic_event(self, style):
"""Orders the results by cyclic event.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def supports_cyclic_event_search_order(self):
"""Tests if a cyclic event search order is available.
:return: ``true`` if a cyclic event search order is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_cyclic_event_search_order(self):
"""Gets the cyclic event search order.
:return: the cyclic event search order
:rtype: ``osid.calendaring.cycle.CyclicEventSearchOrder``
:raise: ``IllegalState`` -- ``supports_cyclic_event_search_order()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.cycle.CyclicEventSearchOrder
cyclic_event_search_order = property(fget=get_cyclic_event_search_order)
@abc.abstractmethod
def order_by_demographic(self, style):
"""Specifies a preference for ordering the results by the associated demographic resource.
:param style: search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def supports_demographic_search_order(self):
"""Tests if a ``ResourceSearchOrder`` is available.
:return: ``true`` if a resource search order is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_demographic_search_order(self):
"""Gets the search order for a demographic resource.
:return: the resource search order
:rtype: ``osid.resource.ResourceSearchOrder``
:raise: ``Unimplemented`` -- ``supports_demographic_search_order()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_demographic_search_order()`` is ``true``.*
"""
return # osid.resource.ResourceSearchOrder
demographic_search_order = property(fget=get_demographic_search_order)
class OsidConstrainerSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
class OsidProcessorSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
class OsidGovernatorSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
class OsidCompendiumSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def order_by_start_date(self, style):
"""Specifies a preference for ordering the result set by the start date.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_end_date(self, style):
"""Specifies a preference for ordering the result set by the end date.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_interpolated(self, style):
"""Specifies a preference for ordering the result set by interpolated results.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_by_extrapolated(self, style):
"""Specifies a preference for ordering the result set by extrapolated results.
:param style: the search order style
:type style: ``osid.SearchOrderStyle``
:raise: ``NullArgument`` -- ``style`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OsidCapsuleSearchOrder:
"""An interface for specifying the ordering of search results."""
__metaclass__ = abc.ABCMeta
| |
"""Support for Xiaomi Mi Air Purifier and Xiaomi Mi Air Humidifier with humidifier entity."""
from enum import Enum
import logging
import math
from miio.airhumidifier import OperationMode as AirhumidifierOperationMode
from miio.airhumidifier_miot import OperationMode as AirhumidifierMiotOperationMode
from miio.airhumidifier_mjjsq import OperationMode as AirhumidifierMjjsqOperationMode
from homeassistant.components.humidifier import HumidifierEntity
from homeassistant.components.humidifier.const import (
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
DEVICE_CLASS_HUMIDIFIER,
SUPPORT_MODES,
)
from homeassistant.const import ATTR_MODE
from homeassistant.core import callback
from homeassistant.util.percentage import percentage_to_ranged_value
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_MODEL,
DOMAIN,
KEY_COORDINATOR,
KEY_DEVICE,
MODEL_AIRHUMIDIFIER_CA1,
MODEL_AIRHUMIDIFIER_CA4,
MODEL_AIRHUMIDIFIER_CB1,
MODELS_HUMIDIFIER_MIOT,
MODELS_HUMIDIFIER_MJJSQ,
)
from .device import XiaomiCoordinatedMiioEntity
_LOGGER = logging.getLogger(__name__)
# Air Humidifier
ATTR_TARGET_HUMIDITY = "target_humidity"
AVAILABLE_ATTRIBUTES = {
ATTR_MODE: "mode",
ATTR_TARGET_HUMIDITY: "target_humidity",
}
AVAILABLE_MODES_CA1_CB1 = [
mode.name
for mode in AirhumidifierOperationMode
if mode is not AirhumidifierOperationMode.Strong
]
AVAILABLE_MODES_CA4 = [mode.name for mode in AirhumidifierMiotOperationMode]
AVAILABLE_MODES_MJJSQ = [
mode.name
for mode in AirhumidifierMjjsqOperationMode
if mode is not AirhumidifierMjjsqOperationMode.WetAndProtect
]
AVAILABLE_MODES_OTHER = [
mode.name
for mode in AirhumidifierOperationMode
if mode is not AirhumidifierOperationMode.Auto
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Humidifier from a config entry."""
if not config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
return
entities = []
model = config_entry.data[CONF_MODEL]
unique_id = config_entry.unique_id
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
name = config_entry.title
if model in MODELS_HUMIDIFIER_MIOT:
air_humidifier = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE]
entity = XiaomiAirHumidifierMiot(
name,
air_humidifier,
config_entry,
unique_id,
coordinator,
)
elif model in MODELS_HUMIDIFIER_MJJSQ:
air_humidifier = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE]
entity = XiaomiAirHumidifierMjjsq(
name,
air_humidifier,
config_entry,
unique_id,
coordinator,
)
else:
air_humidifier = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE]
entity = XiaomiAirHumidifier(
name,
air_humidifier,
config_entry,
unique_id,
coordinator,
)
entities.append(entity)
async_add_entities(entities)
class XiaomiGenericHumidifier(XiaomiCoordinatedMiioEntity, HumidifierEntity):
"""Representation of a generic Xiaomi humidifier device."""
_attr_device_class = DEVICE_CLASS_HUMIDIFIER
_attr_supported_features = SUPPORT_MODES
def __init__(self, name, device, entry, unique_id, coordinator):
"""Initialize the generic Xiaomi device."""
super().__init__(name, device, entry, unique_id, coordinator=coordinator)
self._state = None
self._attributes = {}
self._available_modes = []
self._mode = None
self._min_humidity = DEFAULT_MIN_HUMIDITY
self._max_humidity = DEFAULT_MAX_HUMIDITY
self._humidity_steps = 100
self._target_humidity = None
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@staticmethod
def _extract_value_from_attribute(state, attribute):
value = getattr(state, attribute)
if isinstance(value, Enum):
return value.value
return value
@property
def available_modes(self) -> list:
"""Get the list of available modes."""
return self._available_modes
@property
def mode(self):
"""Get the current mode."""
return self._mode
@property
def min_humidity(self):
"""Return the minimum target humidity."""
return self._min_humidity
@property
def max_humidity(self):
"""Return the maximum target humidity."""
return self._max_humidity
async def async_turn_on(
self,
**kwargs,
) -> None:
"""Turn the device on."""
result = await self._try_command(
"Turning the miio device on failed.", self._device.on
)
if result:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the device off."""
result = await self._try_command(
"Turning the miio device off failed.", self._device.off
)
if result:
self._state = False
self.async_write_ha_state()
def translate_humidity(self, humidity):
"""Translate the target humidity to the first valid step."""
return (
math.ceil(percentage_to_ranged_value((1, self._humidity_steps), humidity))
* 100
/ self._humidity_steps
if 0 < humidity <= 100
else None
)
class XiaomiAirHumidifier(XiaomiGenericHumidifier, HumidifierEntity):
"""Representation of a Xiaomi Air Humidifier."""
def __init__(self, name, device, entry, unique_id, coordinator):
"""Initialize the plug switch."""
super().__init__(name, device, entry, unique_id, coordinator)
if self._model in [MODEL_AIRHUMIDIFIER_CA1, MODEL_AIRHUMIDIFIER_CB1]:
self._available_modes = AVAILABLE_MODES_CA1_CB1
self._min_humidity = 30
self._max_humidity = 80
self._humidity_steps = 10
elif self._model in [MODEL_AIRHUMIDIFIER_CA4]:
self._available_modes = AVAILABLE_MODES_CA4
self._min_humidity = 30
self._max_humidity = 80
self._humidity_steps = 100
elif self._model in MODELS_HUMIDIFIER_MJJSQ:
self._available_modes = AVAILABLE_MODES_MJJSQ
self._min_humidity = 30
self._max_humidity = 80
self._humidity_steps = 100
else:
self._available_modes = AVAILABLE_MODES_OTHER
self._min_humidity = 30
self._max_humidity = 80
self._humidity_steps = 10
self._state = self.coordinator.data.is_on
self._attributes.update(
{
key: self._extract_value_from_attribute(self.coordinator.data, value)
for key, value in AVAILABLE_ATTRIBUTES.items()
}
)
self._target_humidity = self._attributes[ATTR_TARGET_HUMIDITY]
self._mode = self._attributes[ATTR_MODE]
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@callback
def _handle_coordinator_update(self):
"""Fetch state from the device."""
self._state = self.coordinator.data.is_on
self._attributes.update(
{
key: self._extract_value_from_attribute(self.coordinator.data, value)
for key, value in AVAILABLE_ATTRIBUTES.items()
}
)
self._target_humidity = self._attributes[ATTR_TARGET_HUMIDITY]
self._mode = self._attributes[ATTR_MODE]
self.async_write_ha_state()
@property
def mode(self):
"""Return the current mode."""
return AirhumidifierOperationMode(self._mode).name
@property
def target_humidity(self):
"""Return the target humidity."""
return (
self._target_humidity
if self._mode == AirhumidifierOperationMode.Auto.value
or AirhumidifierOperationMode.Auto.name not in self.available_modes
else None
)
async def async_set_humidity(self, humidity: int) -> None:
"""Set the target humidity of the humidifier and set the mode to auto."""
target_humidity = self.translate_humidity(humidity)
if not target_humidity:
return
_LOGGER.debug("Setting the target humidity to: %s", target_humidity)
if await self._try_command(
"Setting target humidity of the miio device failed.",
self._device.set_target_humidity,
target_humidity,
):
self._target_humidity = target_humidity
if (
self.supported_features & SUPPORT_MODES == 0
or AirhumidifierOperationMode(self._attributes[ATTR_MODE])
== AirhumidifierOperationMode.Auto
or AirhumidifierOperationMode.Auto.name not in self.available_modes
):
self.async_write_ha_state()
return
_LOGGER.debug("Setting the operation mode to: Auto")
if await self._try_command(
"Setting operation mode of the miio device to MODE_AUTO failed.",
self._device.set_mode,
AirhumidifierOperationMode.Auto,
):
self._mode = AirhumidifierOperationMode.Auto.value
self.async_write_ha_state()
async def async_set_mode(self, mode: str) -> None:
"""Set the mode of the humidifier."""
if self.supported_features & SUPPORT_MODES == 0 or not mode:
return
if mode not in self.available_modes:
_LOGGER.warning("Mode %s is not a valid operation mode", mode)
return
_LOGGER.debug("Setting the operation mode to: %s", mode)
if await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_mode,
AirhumidifierOperationMode[mode],
):
self._mode = mode.lower()
self.async_write_ha_state()
class XiaomiAirHumidifierMiot(XiaomiAirHumidifier):
"""Representation of a Xiaomi Air Humidifier (MiOT protocol)."""
MODE_MAPPING = {
AirhumidifierMiotOperationMode.Auto: "Auto",
AirhumidifierMiotOperationMode.Low: "Low",
AirhumidifierMiotOperationMode.Mid: "Mid",
AirhumidifierMiotOperationMode.High: "High",
}
REVERSE_MODE_MAPPING = {v: k for k, v in MODE_MAPPING.items()}
@property
def mode(self):
"""Return the current mode."""
return AirhumidifierMiotOperationMode(self._mode).name
@property
def target_humidity(self):
"""Return the target humidity."""
if self._state:
return (
self._target_humidity
if AirhumidifierMiotOperationMode(self._mode)
== AirhumidifierMiotOperationMode.Auto
else None
)
return None
async def async_set_humidity(self, humidity: int) -> None:
"""Set the target humidity of the humidifier and set the mode to auto."""
target_humidity = self.translate_humidity(humidity)
if not target_humidity:
return
_LOGGER.debug("Setting the humidity to: %s", target_humidity)
if await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_target_humidity,
target_humidity,
):
self._target_humidity = target_humidity
if (
self.supported_features & SUPPORT_MODES == 0
or AirhumidifierMiotOperationMode(self._attributes[ATTR_MODE])
== AirhumidifierMiotOperationMode.Auto
):
self.async_write_ha_state()
return
_LOGGER.debug("Setting the operation mode to: Auto")
if await self._try_command(
"Setting operation mode of the miio device to MODE_AUTO failed.",
self._device.set_mode,
AirhumidifierMiotOperationMode.Auto,
):
self._mode = 0
self.async_write_ha_state()
async def async_set_mode(self, mode: str) -> None:
"""Set the mode of the fan."""
if self.supported_features & SUPPORT_MODES == 0 or not mode:
return
if mode not in self.REVERSE_MODE_MAPPING:
_LOGGER.warning("Mode %s is not a valid operation mode", mode)
return
_LOGGER.debug("Setting the operation mode to: %s", mode)
if self._state:
if await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_mode,
self.REVERSE_MODE_MAPPING[mode],
):
self._mode = self.REVERSE_MODE_MAPPING[mode].value
self.async_write_ha_state()
class XiaomiAirHumidifierMjjsq(XiaomiAirHumidifier):
"""Representation of a Xiaomi Air MJJSQ Humidifier."""
MODE_MAPPING = {
"Low": AirhumidifierMjjsqOperationMode.Low,
"Medium": AirhumidifierMjjsqOperationMode.Medium,
"High": AirhumidifierMjjsqOperationMode.High,
"Humidity": AirhumidifierMjjsqOperationMode.Humidity,
}
@property
def mode(self):
"""Return the current mode."""
return AirhumidifierMjjsqOperationMode(self._mode).name
@property
def target_humidity(self):
"""Return the target humidity."""
if self._state:
if (
AirhumidifierMjjsqOperationMode(self._mode)
== AirhumidifierMjjsqOperationMode.Humidity
):
return self._target_humidity
return None
async def async_set_humidity(self, humidity: int) -> None:
"""Set the target humidity of the humidifier and set the mode to Humidity."""
target_humidity = self.translate_humidity(humidity)
if not target_humidity:
return
_LOGGER.debug("Setting the humidity to: %s", target_humidity)
if await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_target_humidity,
target_humidity,
):
self._target_humidity = target_humidity
if (
self.supported_features & SUPPORT_MODES == 0
or AirhumidifierMjjsqOperationMode(self._attributes[ATTR_MODE])
== AirhumidifierMjjsqOperationMode.Humidity
):
self.async_write_ha_state()
return
_LOGGER.debug("Setting the operation mode to: Humidity")
if await self._try_command(
"Setting operation mode of the miio device to MODE_HUMIDITY failed.",
self._device.set_mode,
AirhumidifierMjjsqOperationMode.Humidity,
):
self._mode = 3
self.async_write_ha_state()
async def async_set_mode(self, mode: str) -> None:
"""Set the mode of the fan."""
if mode not in self.MODE_MAPPING:
_LOGGER.warning("Mode %s is not a valid operation mode", mode)
return
_LOGGER.debug("Setting the operation mode to: %s", mode)
if self._state:
if await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_mode,
self.MODE_MAPPING[mode],
):
self._mode = self.MODE_MAPPING[mode].value
self.async_write_ha_state()
| |
#!/usr/bin/env python3
# Copyright (c) 2018 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.staticr_util import *
#import time
class GetStakeReport(NavCoinTestFramework):
"""Tests getstakereport accounting."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
def run_test(self):
# Turn off staking until we need it
self.nodes[0].staking(False)
self.nodes[1].staking(False)
self.nodes[2].staking(False)
# Make it to the static rewards fork!
activate_staticr(self.nodes[0])
self.sync_all()
# Use THE spending address
spending_address_public_key = self.nodes[1].getnewaddress()
spending_address_private_key = self.nodes[1].dumpprivkey(spending_address_public_key)
# Create a staking address
staking_address_public_key = self.nodes[2].getnewaddress()
staking_address_private_key = self.nodes[2].dumpprivkey(staking_address_public_key)
# Import the 2 keys into a third wallet
self.nodes[0].importprivkey(spending_address_private_key)
self.nodes[0].importprivkey(staking_address_private_key)
# Create the cold address
coldstaking_address_staking = self.nodes[1].getcoldstakingaddress(staking_address_public_key, spending_address_public_key)
# Send funds to the spending address (leave me NAV for fees)
self.nodes[0].sendtoaddress(spending_address_public_key, self.nodes[0].getbalance() - 1)
self.nodes[0].generate(1)
self.sync_all()
# Stake a block
self.stake_block(self.nodes[1])
# Load the last 24h stake amount for the wallets/nodes
merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']
spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']
staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']
# print('spending', spending_address_last_24h)
# print('staking', staking_address_last_24h)
# print('merged', merged_address_last_24h)
# Make sure we have staked 2 NAV to the spending address
# So that means spending last 24h == 2
# And staking last 24h == 0 We have not sent any coins yet
# And merged will have the total of the spending + staking
assert_equal('2.00', merged_address_last_24h)
assert_equal('2.00', spending_address_last_24h)
assert_equal('0.00', staking_address_last_24h)
# Send funds to the cold staking address (leave some NAV for fees)
self.nodes[1].sendtoaddress(coldstaking_address_staking, self.nodes[1].getbalance() - 1)
self.nodes[1].generate(1)
self.sync_all()
# Stake a block
self.stake_block(self.nodes[2])
# Load the last 24h stake amount for the wallets/nodes
merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']
spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']
staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']
# print('spending', spending_address_last_24h)
# print('staking', staking_address_last_24h)
# print('merged', merged_address_last_24h)
# Make sure we staked 4 NAV in spending address (2 NAV via COLD Stake)
# So that means spending last 24h == 4
# And staking last 24h == 2 We stake 2 NAV via COLD already
# And merged will have the total of the spending + staking
assert_equal('4.00', merged_address_last_24h)
assert_equal('4.00', spending_address_last_24h)
assert_equal('2.00', staking_address_last_24h)
# Time travel 2 days in the future
cur_time = int(time.time())
self.nodes[0].setmocktime(cur_time + 172800)
self.nodes[1].setmocktime(cur_time + 172800)
self.nodes[2].setmocktime(cur_time + 172800)
# Stake a block
self.stake_block(self.nodes[2])
# Load the last 24h stake amount for the wallets/nodes
merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']
spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']
staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']
# Check the amounts
assert_equal('2.00', merged_address_last_24h)
assert_equal('2.00', spending_address_last_24h)
assert_equal('2.00', staking_address_last_24h)
# Load the last 7 days stake amount for the wallets/nodes
merged_address_last_7d = self.nodes[0].getstakereport()['Last 7 Days']
spending_address_last_7d = self.nodes[1].getstakereport()['Last 7 Days']
staking_address_last_7d = self.nodes[2].getstakereport()['Last 7 Days']
# Check the amounts
assert_equal('6.00', merged_address_last_7d)
assert_equal('6.00', spending_address_last_7d)
assert_equal('4.00', staking_address_last_7d)
# Load the averages for stake amounts
avg_last7d = self.nodes[0].getstakereport()['Last 7 Days Avg']
avg_last30d = self.nodes[0].getstakereport()['Last 30 Days Avg']
avg_last365d = self.nodes[0].getstakereport()['Last 365 Days Avg']
# Check the amounts
assert_equal('3.00', avg_last7d)
assert_equal('3.00', avg_last30d)
assert_equal('3.00', avg_last365d)
# Time travel 8 days in the future
cur_time = int(time.time())
self.nodes[0].setmocktime(cur_time + 691200)
self.nodes[1].setmocktime(cur_time + 691200)
self.nodes[2].setmocktime(cur_time + 691200)
# Load the last 24h stake amount for the wallets/nodes
merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']
spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']
staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']
# Check the amounts
assert_equal('0.00', merged_address_last_24h)
assert_equal('0.00', spending_address_last_24h)
assert_equal('0.00', staking_address_last_24h)
# Load the last 7 days stake amount for the wallets/nodes
merged_address_last_7d = self.nodes[0].getstakereport()['Last 7 Days']
spending_address_last_7d = self.nodes[1].getstakereport()['Last 7 Days']
staking_address_last_7d = self.nodes[2].getstakereport()['Last 7 Days']
# Check the amounts
assert_equal('2.00', merged_address_last_7d)
assert_equal('2.00', spending_address_last_7d)
assert_equal('2.00', staking_address_last_7d)
# Load the averages for stake amounts
avg_last7d = self.nodes[0].getstakereport()['Last 7 Days Avg']
avg_last30d = self.nodes[0].getstakereport()['Last 30 Days Avg']
avg_last365d = self.nodes[0].getstakereport()['Last 365 Days Avg']
# Check the amounts
assert_equal('0.28571428', avg_last7d)
assert_equal('0.75', avg_last30d)
assert_equal('0.75', avg_last365d)
# Time travel 31 days in the future
cur_time = int(time.time())
self.nodes[0].setmocktime(cur_time + 2678400)
self.nodes[1].setmocktime(cur_time + 2678400)
self.nodes[2].setmocktime(cur_time + 2678400)
# Load the last 24h stake amount for the wallets/nodes
merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']
spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']
staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']
# Check the amounts
assert_equal('0.00', merged_address_last_24h)
assert_equal('0.00', spending_address_last_24h)
assert_equal('0.00', staking_address_last_24h)
# Load the last 7 days stake amount for the wallets/nodes
merged_address_last_7d = self.nodes[0].getstakereport()['Last 7 Days']
spending_address_last_7d = self.nodes[1].getstakereport()['Last 7 Days']
staking_address_last_7d = self.nodes[2].getstakereport()['Last 7 Days']
# Check the amounts
assert_equal('0.00', merged_address_last_7d)
assert_equal('0.00', spending_address_last_7d)
assert_equal('0.00', staking_address_last_7d)
# Load the averages for stake amounts
avg_last7d = self.nodes[0].getstakereport()['Last 7 Days Avg']
avg_last30d = self.nodes[0].getstakereport()['Last 30 Days Avg']
avg_last365d = self.nodes[0].getstakereport()['Last 365 Days Avg']
# Check the amounts
assert_equal('0.00', avg_last7d)
assert_equal('0.06666666', avg_last30d)
assert_equal('0.19354838', avg_last365d)
# Disconnect the nodes
for node in self.nodes[0].getpeerinfo():
self.nodes[0].disconnectnode(node['addr'])
time.sleep(2) #disconnecting a node needs a little bit of time
assert(self.nodes[0].getpeerinfo() == [])
# Stake a block on node 0
orphaned_block_hash = self.stake_block(self.nodes[0], False)
# Generate some blocks on node 1
self.nodes[1].generate(100)
# Reconnect the nodes
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
# Wait for blocks to sync
self.sync_all()
# Make sure the block was orphaned
assert(self.nodes[0].getblock(orphaned_block_hash)['confirmations'] == -1)
# Check the staked amount
# Should be 0 (Zero) as the last staked block is orphaned
assert_equal('0.00', self.nodes[0].getstakereport()['Last 7 Days'])
def stake_block(self, node, mature = True):
# Get the current block count to check against while we wait for a stake
blockcount = node.getblockcount()
# Turn staking on
node.staking(True)
# wait for a new block to be mined
while node.getblockcount() == blockcount:
# print("waiting for a new block...")
time.sleep(1)
# We got one
# print("found a new block...")
# Turn staking off
node.staking(False)
# Get the staked block
block_hash = node.getbestblockhash()
# Only mature the blocks if we asked for it
if (mature):
# Make sure the blocks are mature before we check the report
slow_gen(node, 5, 0.5)
self.sync_all()
# return the block hash to the function caller
return block_hash
if __name__ == '__main__':
GetStakeReport().main()
| |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Written by: David Lanstein ( dlanstein gmail com )
import string
import sys
import os.path
import logging
logging.getLogger('suds').setLevel(logging.ERROR)
from suds.client import Client
try:
# suds 0.3.8 and prior
from suds.transport.cache import FileCache
except:
# suds 0.3.9+
from suds.cache import FileCache
import suds.sudsobject
from suds.sax.element import Element
class SforceBaseClient(object):
_sforce = None
_sessionId = None
_location = None
_product = 'Python Toolkit'
_version = (0, 1, 3)
_objectNamespace = None
_strictResultTyping = False
_allowFieldTruncationHeader = None
_assignmentRuleHeader = None
_callOptions = None
_assignmentRuleHeader = None
_emailHeader = None
_localeOptions = None
_loginScopeHeader = None
_mruHeader = None
_packageVersionHeader = None
_queryOptions = None
_sessionHeader = None
_userTerritoryDeleteHeader = None
def __init__(self, wsdl, cacheDuration = 0, **kwargs):
'''
Connect to Salesforce
'wsdl' : Location of WSDL
'cacheDuration' : Duration of HTTP GET cache in seconds, or 0 for no cache
'proxy' : Dict of pair of 'protocol' and 'location'
e.g. {'http': 'my.insecure.proxy.example.com:80'}
'username' : Username for HTTP auth when using a proxy ONLY
'password' : Password for HTTP auth when using a proxy ONLY
'''
# Suds can only accept WSDL locations with a protocol prepended
if '://' not in wsdl:
# TODO windows users???
# check if file exists, else let bubble up to suds as is
# definitely don't want to assume http or https
if os.path.isfile(wsdl):
wsdl = 'file://' + os.path.abspath(wsdl)
if cacheDuration > 0:
cache = FileCache()
cache.setduration(seconds = cacheDuration)
else:
cache = None
self._sforce = Client(wsdl, cache = cache)
# Set HTTP headers
headers = {'User-Agent': 'Salesforce/' + self._product + '/' + '.'.join(str(x) for x in self._version)}
# This HTTP header will not work until Suds gunzips/inflates the content
# 'Accept-Encoding': 'gzip, deflate'
self._sforce.set_options(headers = headers)
if kwargs.has_key('proxy'):
# urllib2 cannot handle HTTPS proxies yet (see bottom of README)
if kwargs['proxy'].has_key('https'):
raise NotImplementedError('Connecting to a proxy over HTTPS not yet implemented due to a \
limitation in the underlying urllib2 proxy implementation. However, traffic from a proxy to \
Salesforce will use HTTPS.')
self._sforce.set_options(proxy = kwargs['proxy'])
if kwargs.has_key('username'):
self._sforce.set_options(username = kwargs['username'])
if kwargs.has_key('password'):
self._sforce.set_options(password = kwargs['password'])
# Toolkit-specific methods
def generateHeader(self, sObjectType):
'''
Generate a SOAP header as defined in:
http://www.salesforce.com/us/developer/docs/api/Content/soap_headers.htm
'''
try:
return self._sforce.factory.create(sObjectType)
except:
print 'There is not a SOAP header of type %s' % sObjectType
def generateObject(self, sObjectType):
'''
Generate a Salesforce object, such as a Lead or Contact
'''
obj = self._sforce.factory.create('ens:sObject')
obj.type = sObjectType
return obj
def _handleResultTyping(self, result):
'''
If any of the following calls return a single result, and self._strictResultTyping is true,
return the single result, rather than [(SaveResult) {...}]:
convertLead()
create()
delete()
emptyRecycleBin()
invalidateSessions()
merge()
process()
retrieve()
undelete()
update()
upsert()
describeSObjects()
sendEmail()
'''
if self._strictResultTyping == False and len(result) == 1:
return result[0]
else:
return result
def _marshallSObjects(self, sObjects, tag = 'sObjects'):
'''
Marshall generic sObjects into a list of SAX elements
This code is going away ASAP
tag param is for nested objects (e.g. MergeRequest) where
key: object must be in <key/>, not <sObjects/>
'''
if not isinstance(sObjects, (tuple, list)):
sObjects = (sObjects, )
if sObjects[0].type in ['LeadConvert', 'SingleEmailMessage', 'MassEmailMessage']:
nsPrefix = 'tns:'
else:
nsPrefix = 'ens:'
li = []
for obj in sObjects:
el = Element(tag)
el.set('xsi:type', nsPrefix + obj.type)
for k, v in obj:
if k == 'type':
continue
# This is here to avoid 'duplicate values' error when setting a field in fieldsToNull
# Even a tag like <FieldName/> will trigger it
if v == None:
# not going to win any awards for variable-naming scheme here
tmp = Element(k)
tmp.set('xsi:nil', 'true')
el.append(tmp)
elif isinstance(v, (list, tuple)):
for value in v:
el.append(Element(k).setText(value))
elif isinstance(v, suds.sudsobject.Object):
el.append(self._marshallSObjects(v, k))
else:
el.append(Element(k).setText(v))
li.append(el)
return li
def _setEndpoint(self, location):
'''
Set the endpoint after when Salesforce returns the URL after successful login()
'''
# suds 0.3.7+ supports multiple wsdl services, but breaks setlocation :(
# see https://fedorahosted.org/suds/ticket/261
try:
self._sforce.set_options(location = location)
except:
self._sforce.wsdl.service.setlocation(location)
self._location = location
def _setHeaders(self, call = None):
'''
Attach particular SOAP headers to the request depending on the method call made
'''
# All calls, including utility calls, set the session header
headers = {'SessionHeader': self._sessionHeader}
if call in ('convertLead',
'create',
'merge',
'process',
'undelete',
'update',
'upsert'):
if self._allowFieldTruncationHeader is not None:
headers['AllowFieldTruncationHeader'] = self._allowFieldTruncationHeader
if call in ('create',
'merge',
'update',
'upsert'):
if self._assignmentRuleHeader is not None:
headers['AssignmentRuleHeader'] = self._assignmentRuleHeader
# CallOptions will only ever be set by the SforcePartnerClient
if self._callOptions is not None:
if call in ('create',
'merge',
'queryAll',
'query',
'queryMore',
'retrieve',
'search',
'update',
'upsert',
'convertLead',
'login',
'delete',
'describeGlobal',
'describeLayout',
'describeTabs',
'describeSObject',
'describeSObjects',
'getDeleted',
'getUpdated',
'process',
'undelete',
'getServerTimestamp',
'getUserInfo',
'setPassword',
'resetPassword'):
headers['CallOptions'] = self._callOptions
if call in ('create',
'delete',
'resetPassword',
'update',
'upsert'):
if self._emailHeader is not None:
headers['EmailHeader'] = self._emailHeader
if call in ('describeSObject',
'describeSObjects'):
if self._localeOptions is not None:
headers['LocaleOptions'] = self._localeOptions
if call == 'login':
if self._loginScopeHeader is not None:
headers['LoginScopeHeader'] = self._loginScopeHeader
if call in ('create',
'merge',
'query',
'retrieve',
'update',
'upsert'):
if self._mruHeader is not None:
headers['MruHeader'] = self._mruHeader
if call in ('convertLead',
'create',
'delete',
'describeGlobal',
'describeLayout',
'describeSObject',
'describeSObjects',
'describeTabs',
'merge',
'process',
'query',
'retrieve',
'search',
'undelete',
'update',
'upsert'):
if self._packageVersionHeader is not None:
headers['PackageVersionHeader'] = self._packageVersionHeader
if call in ('query',
'queryAll',
'queryMore',
'retrieve'):
if self._queryOptions is not None:
headers['QueryOptions'] = self._queryOptions
if call == 'delete':
if self._userTerritoryDeleteHeader is not None:
headers['UserTerritoryDeleteHeader'] = self._userTerritoryDeleteHeader
self._sforce.set_options(soapheaders = headers)
def setStrictResultTyping(self, strictResultTyping):
'''
Set whether single results from any of the following calls return the result wrapped in a list,
or simply the single result object:
convertLead()
create()
delete()
emptyRecycleBin()
invalidateSessions()
merge()
process()
retrieve()
undelete()
update()
upsert()
describeSObjects()
sendEmail()
'''
self._strictResultTyping = strictResultTyping
def getSessionId(self):
return self._sessionId
def getLocation(self):
return self._location
def getConnection(self):
return self._sforce
def getLastRequest(self):
return str(self._sforce.last_sent())
def getLastResponse(self):
return str(self._sforce.last_received())
# Core calls
def convertLead(self, leadConverts):
'''
Converts a Lead into an Account, Contact, or (optionally) an Opportunity.
'''
self._setHeaders('convertLead')
return self._handleResultTyping(self._sforce.service.convertLead(leadConverts))
def create(self, sObjects):
self._setHeaders('create')
return self._handleResultTyping(self._sforce.service.create(sObjects))
def delete(self, ids):
'''
Deletes one or more objects
'''
self._setHeaders('delete')
return self._handleResultTyping(self._sforce.service.delete(ids))
def emptyRecycleBin(self, ids):
'''
Permanently deletes one or more objects
'''
self._setHeaders('emptyRecycleBin')
return self._handleResultTyping(self._sforce.service.emptyRecycleBin(ids))
def getDeleted(self, sObjectType, startDate, endDate):
'''
Retrieves the list of individual objects that have been deleted within the
given timespan for the specified object.
'''
self._setHeaders('getDeleted')
return self._sforce.service.getDeleted(sObjectType, startDate, endDate)
def getUpdated(self, sObjectType, startDate, endDate):
'''
Retrieves the list of individual objects that have been updated (added or
changed) within the given timespan for the specified object.
'''
self._setHeaders('getUpdated')
return self._sforce.service.getUpdated(sObjectType, startDate, endDate)
def invalidateSessions(self, sessionIds):
'''
Invalidate a Salesforce session
This should be used with extreme caution, for the following (undocumented) reason:
All API connections for a given user share a single session ID
This will call logout() WHICH LOGS OUT THAT USER FROM EVERY CONCURRENT SESSION
return invalidateSessionsResult
'''
self._setHeaders('invalidateSessions')
return self._handleResultTyping(self._sforce.service.invalidateSessions(sessionIds))
def login(self, username, password, token):
'''
Login to Salesforce.com and starts a client session.
Unlike other toolkits, token is a separate parameter, because
Salesforce doesn't explicitly tell you to append it when it gives
you a login error. Folks that are new to the API may not know this.
'username' : Username
'password' : Password
'token' : Token
return LoginResult
'''
self._setHeaders('login')
result = self._sforce.service.login(username, password + token)
# set session header
header = self.generateHeader('SessionHeader')
header.sessionId = result['sessionId']
self.setSessionHeader(header)
self._sessionId = result['sessionId']
# change URL to point from test.salesforce.com to something like cs2-api.salesforce.com
self._setEndpoint(result['serverUrl'])
# na0.salesforce.com (a.k.a. ssl.salesforce.com) requires ISO-8859-1 instead of UTF-8
if 'ssl.salesforce.com' in result['serverUrl'] or 'na0.salesforce.com' in result['serverUrl']:
# currently, UTF-8 is hard-coded in Suds, can't implement this yet
pass
return result
def logout(self):
'''
Logout from Salesforce.com
This should be used with extreme caution, for the following (undocumented) reason:
All API connections for a given user share a single session ID
Calling logout() LOGS OUT THAT USER FROM EVERY CONCURRENT SESSION
return LogoutResult
'''
self._setHeaders('logout')
return self._sforce.service.logout()
def merge(self, mergeRequests):
self._setHeaders('merge')
return self._handleResultTyping(self._sforce.service.merge(mergeRequests))
def process(self, processRequests):
self._setHeaders('process')
return self._handleResultTyping(self._sforce.service.process(processRequests))
def query(self, queryString):
'''
Executes a query against the specified object and returns data that matches
the specified criteria.
'''
self._setHeaders('query')
return self._sforce.service.query(queryString)
def queryAll(self, queryString):
'''
Retrieves data from specified objects, whether or not they have been deleted.
'''
self._setHeaders('queryAll')
return self._sforce.service.queryAll(queryString)
def queryMore(self, queryLocator):
'''
Retrieves the next batch of objects from a query.
'''
self._setHeaders('queryMore')
return self._sforce.service.queryMore(queryLocator)
def retrieve(self, fieldList, sObjectType, ids):
'''
Retrieves one or more objects based on the specified object IDs.
'''
self._setHeaders('retrieve')
return self._handleResultTyping(self._sforce.service.retrieve(fieldList, sObjectType, ids))
def search(self, searchString):
'''
Executes a text search in your organization's data.
'''
self._setHeaders('search')
return self._sforce.service.search(searchString)
def undelete(self, ids):
'''
Undeletes one or more objects
'''
self._setHeaders('undelete')
return self._handleResultTyping(self._sforce.service.undelete(ids))
def update(self, sObjects):
self._setHeaders('update')
return self._handleResultTyping(self._sforce.service.update(sObjects))
def upsert(self, externalIdFieldName, sObjects):
self._setHeaders('upsert')
return self._handleResultTyping(self._sforce.service.upsert(externalIdFieldName, sObjects))
# Describe calls
def describeGlobal(self):
'''
Retrieves a list of available objects in your organization
'''
self._setHeaders('describeGlobal')
return self._sforce.service.describeGlobal()
def describeLayout(self, sObjectType, recordTypeIds = None):
'''
Use describeLayout to retrieve information about the layout (presentation
of data to users) for a given object type. The describeLayout call returns
metadata about a given page layout, including layouts for edit and
display-only views and record type mappings. Note that field-level security
and layout editability affects which fields appear in a layout.
'''
self._setHeaders('describeLayout')
return self._sforce.service.describeLayout(sObjectType, recordTypeIds)
def describeSObject(self, sObjectsType):
'''
Describes metadata (field list and object properties) for the specified
object.
'''
self._setHeaders('describeSObject')
return self._sforce.service.describeSObject(sObjectsType)
def describeSObjects(self, sObjectTypes):
'''
An array-based version of describeSObject; describes metadata (field list
and object properties) for the specified object or array of objects.
'''
self._setHeaders('describeSObjects')
return self._handleResultTyping(self._sforce.service.describeSObjects(sObjectTypes))
# describeSoftphoneLayout not implemented
# From the docs: "Use this call to obtain information about the layout of a SoftPhone.
# Use only in the context of Salesforce CRM Call Center; do not call directly from client programs."
def describeTabs(self):
'''
The describeTabs call returns information about the standard apps and
custom apps, if any, available for the user who sends the call, including
the list of tabs defined for each app.
'''
self._setHeaders('describeTabs')
return self._sforce.service.describeTabs()
# Utility calls
def getServerTimestamp(self):
'''
Retrieves the current system timestamp (GMT) from the Web service.
'''
self._setHeaders('getServerTimestamp')
return self._sforce.service.getServerTimestamp()
def getUserInfo(self):
self._setHeaders('getUserInfo')
return self._sforce.service.getUserInfo()
def resetPassword(self, userId):
'''
Changes a user's password to a system-generated value.
'''
self._setHeaders('resetPassword')
return self._sforce.service.resetPassword(userId)
def sendEmail(self, emails):
self._setHeaders('sendEmail')
return self._handleResultTyping(self._sforce.service.sendEmail(emails))
def setPassword(self, userId, password):
'''
Sets the specified user's password to the specified value.
'''
self._setHeaders('setPassword')
return self._sforce.service.setPassword(userId, password)
# SOAP header-related calls
def setAllowFieldTruncationHeader(self, header):
self._allowFieldTruncationHeader = header
def setAssignmentRuleHeader(self, header):
self._assignmentRuleHeader = header
# setCallOptions() is only implemented in SforcePartnerClient
# http://www.salesforce.com/us/developer/docs/api/Content/sforce_api_header_calloptions.htm
def setEmailHeader(self, header):
self._emailHeader = header
def setLocaleOptions(self, header):
self._localeOptions = header
def setLoginScopeHeader(self, header):
self._loginScopeHeader = header
def setMruHeader(self, header):
self._mruHeader = header
def setPackageVersionHeader(self, header):
self._packageVersionHeader = header
def setQueryOptions(self, header):
self._queryOptions = header
def setSessionHeader(self, header):
self._sessionHeader = header
def setUserTerritoryDeleteHeader(self, header):
self._userTerritoryDeleteHeader = header
| |
"""Tests for Vizio config flow."""
import pytest
import voluptuous as vol
from homeassistant import data_entry_flow
from homeassistant.components.media_player import DEVICE_CLASS_SPEAKER, DEVICE_CLASS_TV
from homeassistant.components.vizio.config_flow import _get_config_schema
from homeassistant.components.vizio.const import (
CONF_APPS,
CONF_APPS_TO_INCLUDE_OR_EXCLUDE,
CONF_INCLUDE,
CONF_VOLUME_STEP,
DEFAULT_NAME,
DEFAULT_VOLUME_STEP,
DOMAIN,
VIZIO_SCHEMA,
)
from homeassistant.config_entries import (
SOURCE_IGNORE,
SOURCE_IMPORT,
SOURCE_USER,
SOURCE_ZEROCONF,
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_DEVICE_CLASS,
CONF_HOST,
CONF_NAME,
CONF_PIN,
CONF_PORT,
)
from homeassistant.core import HomeAssistant
from .const import (
ACCESS_TOKEN,
CURRENT_APP,
HOST,
HOST2,
MOCK_IMPORT_VALID_TV_CONFIG,
MOCK_INCLUDE_APPS,
MOCK_INCLUDE_NO_APPS,
MOCK_PIN_CONFIG,
MOCK_SPEAKER_CONFIG,
MOCK_TV_CONFIG_NO_TOKEN,
MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG,
MOCK_TV_WITH_EXCLUDE_CONFIG,
MOCK_USER_VALID_TV_CONFIG,
MOCK_ZEROCONF_SERVICE_INFO,
NAME,
NAME2,
UNIQUE_ID,
VOLUME_STEP,
)
from tests.common import MockConfigEntry
async def test_user_flow_minimum_fields(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test user config flow with minimum fields."""
# test form shows
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_SPEAKER_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_SPEAKER
async def test_user_flow_all_fields(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test user config flow with all fields."""
# test form shows
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_VALID_TV_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert result["data"][CONF_ACCESS_TOKEN] == ACCESS_TOKEN
assert CONF_APPS not in result["data"]
async def test_speaker_options_flow(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for speaker."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_SPEAKER_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_VOLUME_STEP: VOLUME_STEP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS not in result["data"]
async def test_tv_options_flow_no_apps(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for TV without providing apps option."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
options = {CONF_VOLUME_STEP: VOLUME_STEP}
options.update(MOCK_INCLUDE_NO_APPS)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=options
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS not in result["data"]
async def test_tv_options_flow_with_apps(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for TV with providing apps option."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
options = {CONF_VOLUME_STEP: VOLUME_STEP}
options.update(MOCK_INCLUDE_APPS)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=options
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS in result["data"]
assert result["data"][CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
async def test_tv_options_flow_start_with_volume(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for TV with providing apps option after providing volume step in initial config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(
entry.entry_id, data={CONF_VOLUME_STEP: VOLUME_STEP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options
assert entry.options == {CONF_VOLUME_STEP: VOLUME_STEP}
assert CONF_APPS not in entry.options
assert CONF_APPS_TO_INCLUDE_OR_EXCLUDE not in entry.options
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
options = {CONF_VOLUME_STEP: VOLUME_STEP}
options.update(MOCK_INCLUDE_APPS)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=options
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS in result["data"]
assert result["data"][CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
async def test_user_host_already_configured(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test host is already configured during user setup."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
fail_entry = MOCK_SPEAKER_CONFIG.copy()
fail_entry[CONF_NAME] = "newtestname"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=fail_entry
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "existing_config_entry_found"}
async def test_user_serial_number_already_exists(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test serial_number is already configured with different host and name during user setup."""
# Set up new entry
MockConfigEntry(
domain=DOMAIN, data=MOCK_SPEAKER_CONFIG, unique_id=UNIQUE_ID
).add_to_hass(hass)
# Set up new entry with same unique_id but different host and name
fail_entry = MOCK_SPEAKER_CONFIG.copy()
fail_entry[CONF_HOST] = HOST2
fail_entry[CONF_NAME] = NAME2
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=fail_entry
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "existing_config_entry_found"}
async def test_user_error_on_could_not_connect(
hass: HomeAssistant, vizio_no_unique_id: pytest.fixture
) -> None:
"""Test with could_not_connect during user setup due to no connectivity."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "cannot_connect"}
async def test_user_error_on_could_not_connect_invalid_token(
hass: HomeAssistant, vizio_cant_connect: pytest.fixture
) -> None:
"""Test with could_not_connect during user setup due to invalid token."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_tv_pairing_no_apps(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_complete_pairing: pytest.fixture,
) -> None:
"""Test pairing config flow when access token not provided for tv during user entry and no apps configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pairing_complete"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert CONF_APPS not in result["data"]
async def test_user_start_pairing_failure(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_start_pairing_failure: pytest.fixture,
) -> None:
"""Test failure to start pairing from user config flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_invalid_pin(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_invalid_pin_failure: pytest.fixture,
) -> None:
"""Test failure to complete pairing from user config flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
assert result["errors"] == {CONF_PIN: "complete_pairing_failed"}
async def test_user_ignore(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test user config flow doesn't throw an error when there's an existing ignored source."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_SPEAKER_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import_flow_minimum_fields(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test import config flow with minimum fields."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(
{CONF_HOST: HOST, CONF_DEVICE_CLASS: DEVICE_CLASS_SPEAKER}
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == DEFAULT_NAME
assert result["data"][CONF_NAME] == DEFAULT_NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_SPEAKER
assert result["data"][CONF_VOLUME_STEP] == DEFAULT_VOLUME_STEP
async def test_import_flow_all_fields(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test import config flow with all fields."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_IMPORT_VALID_TV_CONFIG),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert result["data"][CONF_ACCESS_TOKEN] == ACCESS_TOKEN
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
async def test_import_entity_already_configured(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test entity is already configured during import setup."""
entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
options={CONF_VOLUME_STEP: VOLUME_STEP},
)
entry.add_to_hass(hass)
fail_entry = vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG.copy())
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=fail_entry
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
async def test_import_flow_update_options(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with updated options."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].options == {CONF_VOLUME_STEP: DEFAULT_VOLUME_STEP}
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry_id = result["result"].entry_id
updated_config = MOCK_SPEAKER_CONFIG.copy()
updated_config[CONF_VOLUME_STEP] = VOLUME_STEP + 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(updated_config),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
config_entry = hass.config_entries.async_get_entry(entry_id)
assert config_entry.options[CONF_VOLUME_STEP] == VOLUME_STEP + 1
async def test_import_flow_update_name_and_apps(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with updated name and apps."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_IMPORT_VALID_TV_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].data[CONF_NAME] == NAME
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry_id = result["result"].entry_id
updated_config = MOCK_IMPORT_VALID_TV_CONFIG.copy()
updated_config[CONF_NAME] = NAME2
updated_config[CONF_APPS] = {CONF_INCLUDE: [CURRENT_APP]}
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(updated_config),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
config_entry = hass.config_entries.async_get_entry(entry_id)
assert config_entry.data[CONF_NAME] == NAME2
assert config_entry.data[CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
assert config_entry.options[CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
async def test_import_flow_update_remove_apps(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with removed apps."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_TV_WITH_EXCLUDE_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].data[CONF_NAME] == NAME
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
config_entry = hass.config_entries.async_get_entry(result["result"].entry_id)
assert CONF_APPS in config_entry.data
assert CONF_APPS in config_entry.options
updated_config = MOCK_TV_WITH_EXCLUDE_CONFIG.copy()
updated_config.pop(CONF_APPS)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(updated_config),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
assert CONF_APPS not in config_entry.data
assert CONF_APPS not in config_entry.options
async def test_import_needs_pairing(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_complete_pairing: pytest.fixture,
) -> None:
"""Test pairing config flow when access token not provided for tv during import."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pairing_complete_import"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
async def test_import_with_apps_needs_pairing(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_complete_pairing: pytest.fixture,
) -> None:
"""Test pairing config flow when access token not provided for tv but apps are included during import."""
import_config = MOCK_TV_CONFIG_NO_TOKEN.copy()
import_config[CONF_APPS] = {CONF_INCLUDE: [CURRENT_APP]}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=import_config
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Mock inputting info without apps to make sure apps get stored
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=_get_config_schema(MOCK_TV_CONFIG_NO_TOKEN)(MOCK_TV_CONFIG_NO_TOKEN),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pairing_complete_import"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert result["data"][CONF_APPS][CONF_INCLUDE] == [CURRENT_APP]
async def test_import_flow_additional_configs(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with additional configs defined in CONF_APPS."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].data[CONF_NAME] == NAME
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
config_entry = hass.config_entries.async_get_entry(result["result"].entry_id)
assert CONF_APPS in config_entry.data
assert CONF_APPS not in config_entry.options
async def test_import_error(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test that error is logged when import config has an error."""
entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
fail_entry = MOCK_SPEAKER_CONFIG.copy()
fail_entry[CONF_HOST] = "0.0.0.0"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(fail_entry),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Ensure error gets logged
vizio_log_list = [
log
for log in caplog.records
if log.name == "homeassistant.components.vizio.config_flow"
]
assert len(vizio_log_list) == 1
async def test_import_ignore(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test import config flow doesn't throw an error when there's an existing ignored source."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_zeroconf_flow(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf config flow."""
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Form should always show even if all required properties are discovered
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Apply discovery updates to entry to mimic when user hits submit without changing
# defaults which were set from discovery parameters
user_input = result["data_schema"](discovery_info)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=user_input
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_SPEAKER
async def test_zeroconf_flow_already_configured(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test entity is already configured during zeroconf setup."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
# Try rediscovering same device
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_flow_with_port_in_host(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test entity is already configured during zeroconf setup when port is in host."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
# Try rediscovering same device, this time with port already in host
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
discovery_info[
CONF_HOST
] = f"{discovery_info[CONF_HOST]}:{discovery_info[CONF_PORT]}"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_dupe_fail(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf config flow when device gets discovered multiple times."""
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Form should always show even if all required properties are discovered
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_zeroconf_ignore(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf discovery doesn't throw an error when there's an existing ignored source."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
)
entry.add_to_hass(hass)
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_zeroconf_no_unique_id(
hass: HomeAssistant,
vizio_guess_device_type: pytest.fixture,
vizio_no_unique_id: pytest.fixture,
) -> None:
"""Test zeroconf discovery aborts when unique_id is None."""
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_zeroconf_abort_when_ignored(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf discovery aborts when the same host has been ignored."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_flow_already_configured_hostname(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_hostname_check: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test entity is already configured during zeroconf setup when existing entry uses hostname."""
config = MOCK_SPEAKER_CONFIG.copy()
config[CONF_HOST] = "hostname"
entry = MockConfigEntry(
domain=DOMAIN,
data=config,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
# Try rediscovering same device
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_import_flow_already_configured_hostname(
hass: HomeAssistant,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_hostname_check: pytest.fixture,
) -> None:
"""Test entity is already configured during import setup when existing entry uses hostname."""
config = MOCK_SPEAKER_CONFIG.copy()
config[CONF_HOST] = "hostname"
entry = MockConfigEntry(
domain=DOMAIN, data=config, options={CONF_VOLUME_STEP: VOLUME_STEP}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
)
# Flow should abort because device was updated
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
assert entry.data[CONF_HOST] == HOST
| |
"""Home Assistant auth provider."""
import base64
from collections import OrderedDict
import logging
from typing import Any, Dict, List, Optional, Set, cast # noqa: F401
import bcrypt
import voluptuous as vol
from homeassistant.const import CONF_ID
from homeassistant.core import callback, HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from . import AuthProvider, AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, LoginFlow
from ..models import Credentials, UserMeta
STORAGE_VERSION = 1
STORAGE_KEY = 'auth_provider.homeassistant'
def _disallow_id(conf: Dict[str, Any]) -> Dict[str, Any]:
"""Disallow ID in config."""
if CONF_ID in conf:
raise vol.Invalid(
'ID is not allowed for the homeassistant auth provider.')
return conf
CONFIG_SCHEMA = vol.All(AUTH_PROVIDER_SCHEMA, _disallow_id)
class InvalidAuth(HomeAssistantError):
"""Raised when we encounter invalid authentication."""
class InvalidUser(HomeAssistantError):
"""Raised when invalid user is specified.
Will not be raised when validating authentication.
"""
class Data:
"""Hold the user data."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the user data store."""
self.hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY,
private=True)
self._data = None # type: Optional[Dict[str, Any]]
# Legacy mode will allow usernames to start/end with whitespace
# and will compare usernames case-insensitive.
# Remove in 2020 or when we launch 1.0.
self.is_legacy = False
@callback
def normalize_username(self, username: str) -> str:
"""Normalize a username based on the mode."""
if self.is_legacy:
return username
return username.strip().casefold()
async def async_load(self) -> None:
"""Load stored data."""
data = await self._store.async_load()
if data is None:
data = {
'users': []
}
seen = set() # type: Set[str]
for user in data['users']:
username = user['username']
# check if we have duplicates
folded = username.casefold()
if folded in seen:
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that are case-insensitive"
"equivalent. Please change the username: '%s'.", username)
break
seen.add(folded)
# check if we have unstripped usernames
if username != username.strip():
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that start or end in a "
"space. Please change the username: '%s'.", username)
break
self._data = data
@property
def users(self) -> List[Dict[str, str]]:
"""Return users."""
return self._data['users'] # type: ignore
def validate_login(self, username: str, password: str) -> None:
"""Validate a username and password.
Raises InvalidAuth if auth invalid.
"""
username = self.normalize_username(username)
dummy = b'$2b$12$CiuFGszHx9eNHxPuQcwBWez4CwDTOcLTX5CbOpV6gef2nYuXkY7BO'
found = None
# Compare all users to avoid timing attacks.
for user in self.users:
if self.normalize_username(user['username']) == username:
found = user
if found is None:
# check a hash to make timing the same as if user was found
bcrypt.checkpw(b'foo',
dummy)
raise InvalidAuth
user_hash = base64.b64decode(found['password'])
# bcrypt.checkpw is timing-safe
if not bcrypt.checkpw(password.encode(),
user_hash):
raise InvalidAuth
# pylint: disable=no-self-use
def hash_password(self, password: str, for_storage: bool = False) -> bytes:
"""Encode a password."""
hashed = bcrypt.hashpw(password.encode(), bcrypt.gensalt(rounds=12)) \
# type: bytes
if for_storage:
hashed = base64.b64encode(hashed)
return hashed
def add_auth(self, username: str, password: str) -> None:
"""Add a new authenticated user/pass."""
username = self.normalize_username(username)
if any(self.normalize_username(user['username']) == username
for user in self.users):
raise InvalidUser
self.users.append({
'username': username,
'password': self.hash_password(password, True).decode(),
})
@callback
def async_remove_auth(self, username: str) -> None:
"""Remove authentication."""
username = self.normalize_username(username)
index = None
for i, user in enumerate(self.users):
if self.normalize_username(user['username']) == username:
index = i
break
if index is None:
raise InvalidUser
self.users.pop(index)
def change_password(self, username: str, new_password: str) -> None:
"""Update the password.
Raises InvalidUser if user cannot be found.
"""
username = self.normalize_username(username)
for user in self.users:
if self.normalize_username(user['username']) == username:
user['password'] = self.hash_password(
new_password, True).decode()
break
else:
raise InvalidUser
async def async_save(self) -> None:
"""Save data."""
await self._store.async_save(self._data)
@AUTH_PROVIDERS.register('homeassistant')
class HassAuthProvider(AuthProvider):
"""Auth provider based on a local storage of users in HASS config dir."""
DEFAULT_TITLE = 'Home Assistant Local'
data = None
async def async_initialize(self) -> None:
"""Initialize the auth provider."""
if self.data is not None:
return
self.data = Data(self.hass)
await self.data.async_load()
async def async_login_flow(
self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
return HassLoginFlow(self)
async def async_validate_login(self, username: str, password: str) -> None:
"""Validate a username and password."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(
self.data.validate_login, username, password)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]) -> Credentials:
"""Get credentials based on the flow result."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
norm_username = self.data.normalize_username
username = norm_username(flow_result['username'])
for credential in await self.async_credentials():
if norm_username(credential.data['username']) == username:
return credential
# Create new credentials.
return self.async_create_credentials({
'username': username
})
async def async_user_meta_for_credentials(
self, credentials: Credentials) -> UserMeta:
"""Get extra info for this credential."""
return UserMeta(name=credentials.data['username'], is_active=True)
async def async_will_remove_credentials(
self, credentials: Credentials) -> None:
"""When credentials get removed, also remove the auth."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
try:
self.data.async_remove_auth(credentials.data['username'])
await self.data.async_save()
except InvalidUser:
# Can happen if somehow we didn't clean up a credential
pass
class HassLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None) \
-> Dict[str, Any]:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
await cast(HassAuthProvider, self._auth_provider)\
.async_validate_login(user_input['username'],
user_input['password'])
except InvalidAuth:
errors['base'] = 'invalid_auth'
if not errors:
user_input.pop('password')
return await self.async_finish(user_input)
schema = OrderedDict() # type: Dict[str, type]
schema['username'] = str
schema['password'] = str
return self.async_show_form(
step_id='init',
data_schema=vol.Schema(schema),
errors=errors,
)
| |
import os
from django import http
from django.db.transaction import non_atomic_requests
from django.shortcuts import get_object_or_404, redirect
import caching.base as caching
import commonware.log
from mobility.decorators import mobile_template
from olympia import amo
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import HttpResponseSendFile, urlparams, render
from olympia.access import acl
from olympia.addons.decorators import (
addon_view_factory, owner_or_unlisted_reviewer)
from olympia.addons.models import Addon
from olympia.files.models import File
from olympia.versions.models import Version
# The version detail page redirects to the version within pagination, so we
# need to enforce the number of versions per page.
PER_PAGE = 30
addon_view = addon_view_factory(Addon.objects.valid)
log = commonware.log.getLogger('z.versions')
def _version_list_qs(addon, beta=False):
# We only show versions that have files with the right status.
if beta:
status = amo.STATUS_BETA
elif addon.is_unreviewed():
status = amo.STATUS_AWAITING_REVIEW
else:
status = amo.STATUS_PUBLIC
return (addon.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED)
.filter(files__status=status)
.distinct().order_by('-created'))
@addon_view
@mobile_template('versions/{mobile/}version_list.html')
@non_atomic_requests
def version_list(request, addon, template, beta=False):
qs = _version_list_qs(addon, beta=beta)
versions = amo.utils.paginate(request, qs, PER_PAGE)
versions.object_list = list(versions.object_list)
Version.transformer(versions.object_list)
return render(request, template, {'addon': addon, 'beta': beta,
'versions': versions})
@addon_view
@non_atomic_requests
def version_detail(request, addon, version_num):
beta = amo.VERSION_BETA.search(version_num)
qs = _version_list_qs(addon, beta=beta)
# Use cached_with since values_list won't be cached.
def f():
return _find_version_page(qs, addon, version_num, beta=beta)
return caching.cached_with(qs, f, 'vd:%s:%s' % (addon.id, version_num))
def _find_version_page(qs, addon, version_num, beta=False):
if beta:
url = reverse('addons.beta-versions', args=[addon.slug])
else:
url = reverse('addons.versions', args=[addon.slug])
ids = list(qs.values_list('version', flat=True))
if version_num in ids:
page = 1 + ids.index(version_num) / PER_PAGE
to = urlparams(url, 'version-%s' % version_num, page=page)
return http.HttpResponseRedirect(to)
else:
raise http.Http404()
@addon_view
@non_atomic_requests
def update_info(request, addon, version_num):
qs = addon.versions.filter(version=version_num,
files__status__in=amo.VALID_FILE_STATUSES,
channel=amo.RELEASE_CHANNEL_LISTED)
if not qs:
raise http.Http404()
serve_xhtml = ('application/xhtml+xml' in
request.META.get('HTTP_ACCEPT', '').lower())
return render(request, 'versions/update_info.html',
{'version': qs[0], 'serve_xhtml': serve_xhtml},
content_type='application/xhtml+xml')
@non_atomic_requests
def update_info_redirect(request, version_id):
version = get_object_or_404(Version.objects, pk=version_id)
return redirect(reverse('addons.versions.update_info',
args=(version.addon.id, version.version)),
permanent=True)
# Should accept junk at the end for filename goodness.
@non_atomic_requests
def download_file(request, file_id, type=None, file_=None, addon=None):
def is_editor(channel):
return (acl.check_addons_reviewer(request)
if channel == amo.RELEASE_CHANNEL_LISTED
else acl.check_unlisted_addons_reviewer(request))
if not file_:
file_ = get_object_or_404(File.objects, pk=file_id)
if not addon:
addon = get_object_or_404(Addon.objects,
pk=file_.version.addon_id)
channel = file_.version.channel
if addon.is_disabled or file_.status == amo.STATUS_DISABLED:
if is_editor(channel) or acl.check_addon_ownership(
request, addon, dev=True, viewer=True, ignore_disabled=True):
return HttpResponseSendFile(
request, file_.guarded_file_path,
content_type='application/x-xpinstall')
else:
log.info(
u'download file {file_id}: addon/file disabled and '
u'user {user_id} is not an owner or reviewer.'.format(
file_id=file_id, user_id=request.user.pk))
raise http.Http404() # Not owner or admin.
if channel == amo.RELEASE_CHANNEL_UNLISTED:
if is_editor(channel) or acl.check_addon_ownership(
request, addon, dev=True, viewer=True, ignore_disabled=True):
return HttpResponseSendFile(
request, file_.file_path,
content_type='application/x-xpinstall')
else:
log.info(
u'download file {file_id}: version is unlisted and '
u'user {user_id} is not an owner or reviewer.'.format(
file_id=file_id, user_id=request.user.pk))
raise http.Http404() # Not owner or admin.
attachment = (type == 'attachment' or not request.APP.browser)
loc = urlparams(file_.get_file_cdn_url(attachment=attachment),
filehash=file_.hash)
response = http.HttpResponseRedirect(loc)
response['X-Target-Digest'] = file_.hash
return response
def guard():
return Addon.objects.filter(_current_version__isnull=False)
@addon_view_factory(guard)
@non_atomic_requests
def download_latest(request, addon, beta=False, type='xpi', platform=None):
platforms = [amo.PLATFORM_ALL.id]
if platform is not None and int(platform) in amo.PLATFORMS:
platforms.append(int(platform))
if beta:
if not addon.show_beta:
raise http.Http404()
version = addon.current_beta_version.id
else:
version = addon._current_version_id
files = File.objects.filter(platform__in=platforms,
version=version)
try:
# If there's a file matching our platform, it'll float to the end.
file_ = sorted(files, key=lambda f: f.platform == platforms[-1])[-1]
except IndexError:
raise http.Http404()
return download_file(request, file_.id, type=type, file_=file_,
addon=addon)
@non_atomic_requests
def download_source(request, version_id):
version = get_object_or_404(Version.objects, pk=version_id)
# General case: version is listed.
if version.channel == amo.RELEASE_CHANNEL_LISTED:
if not (version.source and
(acl.check_addon_ownership(
request, version.addon,
viewer=True, ignore_disabled=True) or
acl.action_allowed(request, 'Editors', 'BinarySource'))):
raise http.Http404()
else:
if not owner_or_unlisted_reviewer(request, version.addon):
raise http.Http404 # Not listed, not owner or unlisted reviewer.
res = HttpResponseSendFile(request, version.source.path)
path = version.source.path
if not isinstance(path, unicode):
path = path.decode('utf8')
name = os.path.basename(path.replace(u'"', u''))
disposition = u'attachment; filename="{0}"'.format(name).encode('utf8')
res['Content-Disposition'] = disposition
return res
| |
from __future__ import with_statement
from contextlib import contextmanager
from fudge import Fake, patched_context, with_fakes
import unittest
from nose.tools import eq_, raises, ok_
import random
import sys
import fabric
from fabric.tasks import WrappedCallableTask, execute, Task, get_task_details
from fabric.main import display_command
from fabric.api import run, env, settings, hosts, roles, hide, parallel, task, runs_once, serial
from fabric.network import from_dict
from fabric.exceptions import NetworkError
from utils import eq_, FabricTest, aborts, mock_streams
from server import server
def test_base_task_provides_undefined_name():
task = Task()
eq_("undefined", task.name)
@raises(NotImplementedError)
def test_base_task_raises_exception_on_call_to_run():
task = Task()
task.run()
class TestWrappedCallableTask(unittest.TestCase):
def test_passes_unused_args_to_parent(self):
args = [i for i in range(random.randint(1, 10))]
def foo(): pass
try:
task = WrappedCallableTask(foo, *args)
except TypeError:
msg = "__init__ raised a TypeError, meaning args weren't handled"
self.fail(msg)
def test_passes_unused_kwargs_to_parent(self):
random_range = range(random.randint(1, 10))
kwargs = dict([("key_%s" % i, i) for i in random_range])
def foo(): pass
try:
task = WrappedCallableTask(foo, **kwargs)
except TypeError:
self.fail(
"__init__ raised a TypeError, meaning kwargs weren't handled")
def test_allows_any_number_of_args(self):
args = [i for i in range(random.randint(0, 10))]
def foo(): pass
task = WrappedCallableTask(foo, *args)
def test_allows_any_number_of_kwargs(self):
kwargs = dict([("key%d" % i, i) for i in range(random.randint(0, 10))])
def foo(): pass
task = WrappedCallableTask(foo, **kwargs)
def test_run_is_wrapped_callable(self):
def foo(): pass
task = WrappedCallableTask(foo)
eq_(task.wrapped, foo)
def test_name_is_the_name_of_the_wrapped_callable(self):
def foo(): pass
foo.__name__ = "random_name_%d" % random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(task.name, foo.__name__)
def test_name_can_be_overridden(self):
def foo(): pass
eq_(WrappedCallableTask(foo).name, 'foo')
eq_(WrappedCallableTask(foo, name='notfoo').name, 'notfoo')
def test_reads_double_under_doc_from_callable(self):
def foo(): pass
foo.__doc__ = "Some random __doc__: %d" % random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(task.__doc__, foo.__doc__)
def test_dispatches_to_wrapped_callable_on_run(self):
random_value = "some random value %d" % random.randint(1000, 2000)
def foo(): return random_value
task = WrappedCallableTask(foo)
eq_(random_value, task())
def test_passes_all_regular_args_to_run(self):
def foo(*args): return args
random_args = tuple(
[random.randint(1000, 2000) for i in range(random.randint(1, 5))]
)
task = WrappedCallableTask(foo)
eq_(random_args, task(*random_args))
def test_passes_all_keyword_args_to_run(self):
def foo(**kwargs): return kwargs
random_kwargs = {}
for i in range(random.randint(1, 5)):
random_key = ("foo", "bar", "baz", "foobar", "barfoo")[i]
random_kwargs[random_key] = random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(random_kwargs, task(**random_kwargs))
def test_calling_the_object_is_the_same_as_run(self):
random_return = random.randint(1000, 2000)
def foo(): return random_return
task = WrappedCallableTask(foo)
eq_(task(), task.run())
class TestTask(unittest.TestCase):
def test_takes_an_alias_kwarg_and_wraps_it_in_aliases_list(self):
random_alias = "alias_%d" % random.randint(100, 200)
task = Task(alias=random_alias)
self.assertTrue(random_alias in task.aliases)
def test_aliases_are_set_based_on_provided_aliases(self):
aliases = ["a_%d" % i for i in range(random.randint(1, 10))]
task = Task(aliases=aliases)
self.assertTrue(all([a in task.aliases for a in aliases]))
def test_aliases_are_None_by_default(self):
task = Task()
self.assertTrue(task.aliases is None)
# Reminder: decorator syntax, e.g.:
# @foo
# def bar():...
#
# is semantically equivalent to:
# def bar():...
# bar = foo(bar)
#
# this simplifies testing :)
def test_decorator_incompatibility_on_task():
from fabric.decorators import task, hosts, runs_once, roles
def foo(): return "foo"
foo = task(foo)
# since we aren't setting foo to be the newly decorated thing, its cool
hosts('me@localhost')(foo)
runs_once(foo)
roles('www')(foo)
def test_decorator_closure_hiding():
"""
@task should not accidentally destroy decorated attributes from @hosts/etc
"""
from fabric.decorators import task, hosts
def foo():
print(env.host_string)
foo = task(hosts("me@localhost")(foo))
eq_(["me@localhost"], foo.hosts)
#
# execute()
#
def dict_contains(superset, subset):
"""
Assert that all key/val pairs in dict 'subset' also exist in 'superset'
"""
for key, value in subset.iteritems():
ok_(key in superset)
eq_(superset[key], value)
class TestExecute(FabricTest):
@with_fakes
def test_calls_task_function_objects(self):
"""
should execute the passed-in function object
"""
execute(Fake(callable=True, expect_call=True))
@with_fakes
def test_should_look_up_task_name(self):
"""
should also be able to handle task name strings
"""
name = 'task1'
commands = {name: Fake(callable=True, expect_call=True)}
with patched_context(fabric.state, 'commands', commands):
execute(name)
@with_fakes
def test_should_handle_name_of_Task_object(self):
"""
handle corner case of Task object referrred to by name
"""
name = 'task2'
class MyTask(Task):
run = Fake(callable=True, expect_call=True)
mytask = MyTask()
mytask.name = name
commands = {name: mytask}
with patched_context(fabric.state, 'commands', commands):
execute(name)
@aborts
def test_should_abort_if_task_name_not_found(self):
"""
should abort if given an invalid task name
"""
execute('thisisnotavalidtaskname')
def test_should_not_abort_if_task_name_not_found_with_skip(self):
"""
should not abort if given an invalid task name
and skip_unknown_tasks in env
"""
env.skip_unknown_tasks = True
execute('thisisnotavalidtaskname')
del env['skip_unknown_tasks']
@with_fakes
def test_should_pass_through_args_kwargs(self):
"""
should pass in any additional args, kwargs to the given task.
"""
task = (
Fake(callable=True, expect_call=True)
.with_args('foo', biz='baz')
)
execute(task, 'foo', biz='baz')
@with_fakes
def test_should_honor_hosts_kwarg(self):
"""
should use hosts kwarg to set run list
"""
# Make two full copies of a host list
hostlist = ['a', 'b', 'c']
hosts = hostlist[:]
# Side-effect which asserts the value of env.host_string when it runs
def host_string():
eq_(env.host_string, hostlist.pop(0))
task = Fake(callable=True, expect_call=True).calls(host_string)
with hide('everything'):
execute(task, hosts=hosts)
def test_should_honor_hosts_decorator(self):
"""
should honor @hosts on passed-in task objects
"""
# Make two full copies of a host list
hostlist = ['a', 'b', 'c']
@hosts(*hostlist[:])
def task():
eq_(env.host_string, hostlist.pop(0))
with hide('running'):
execute(task)
def test_should_honor_roles_decorator(self):
"""
should honor @roles on passed-in task objects
"""
# Make two full copies of a host list
roledefs = {'role1': ['a', 'b', 'c']}
role_copy = roledefs['role1'][:]
@roles('role1')
def task():
eq_(env.host_string, role_copy.pop(0))
with settings(hide('running'), roledefs=roledefs):
execute(task)
@with_fakes
def test_should_set_env_command_to_string_arg(self):
"""
should set env.command to any string arg, if given
"""
name = "foo"
def command():
eq_(env.command, name)
task = Fake(callable=True, expect_call=True).calls(command)
with patched_context(fabric.state, 'commands', {name: task}):
execute(name)
@with_fakes
def test_should_set_env_command_to_name_attr(self):
"""
should set env.command to TaskSubclass.name if possible
"""
name = "foo"
def command():
eq_(env.command, name)
task = (
Fake(callable=True, expect_call=True)
.has_attr(name=name)
.calls(command)
)
execute(task)
@with_fakes
def test_should_set_all_hosts(self):
"""
should set env.all_hosts to its derived host list
"""
hosts = ['a', 'b']
roledefs = {'r1': ['c', 'd']}
roles = ['r1']
exclude_hosts = ['a']
def command():
eq_(set(env.all_hosts), set(['b', 'c', 'd']))
task = Fake(callable=True, expect_call=True).calls(command)
with settings(hide('everything'), roledefs=roledefs):
execute(
task, hosts=hosts, roles=roles, exclude_hosts=exclude_hosts
)
@mock_streams('stdout')
def test_should_print_executing_line_per_host(self):
"""
should print "Executing" line once per host
"""
def task():
pass
execute(task, hosts=['host1', 'host2'])
eq_(sys.stdout.getvalue(), """[host1] Executing task 'task'
[host2] Executing task 'task'
""")
@mock_streams('stdout')
def test_should_not_print_executing_line_for_singletons(self):
"""
should not print "Executing" line for non-networked tasks
"""
def task():
pass
with settings(hosts=[]): # protect against really odd test bleed :(
execute(task)
eq_(sys.stdout.getvalue(), "")
def test_should_return_dict_for_base_case(self):
"""
Non-network-related tasks should return a dict w/ special key
"""
def task():
return "foo"
eq_(execute(task), {'<local-only>': 'foo'})
@server(port=2200)
@server(port=2201)
def test_should_return_dict_for_serial_use_case(self):
"""
Networked but serial tasks should return per-host-string dict
"""
ports = [2200, 2201]
hosts = map(lambda x: '127.0.0.1:%s' % x, ports)
def task():
run("ls /simple")
return "foo"
with hide('everything'):
eq_(execute(task, hosts=hosts), {
'127.0.0.1:2200': 'foo',
'127.0.0.1:2201': 'foo'
})
@server()
def test_should_preserve_None_for_non_returning_tasks(self):
"""
Tasks which don't return anything should still show up in the dict
"""
def local_task():
pass
def remote_task():
with hide('everything'):
run("ls /simple")
eq_(execute(local_task), {'<local-only>': None})
with hide('everything'):
eq_(
execute(remote_task, hosts=[env.host_string]),
{env.host_string: None}
)
def test_should_use_sentinel_for_tasks_that_errored(self):
"""
Tasks which errored but didn't abort should contain an eg NetworkError
"""
def task():
run("whoops")
host_string = 'localhost:1234'
with settings(hide('everything'), skip_bad_hosts=True):
retval = execute(task, hosts=[host_string])
assert isinstance(retval[host_string], NetworkError)
@server(port=2200)
@server(port=2201)
def test_parallel_return_values(self):
"""
Parallel mode should still return values as in serial mode
"""
@parallel
@hosts('127.0.0.1:2200', '127.0.0.1:2201')
def task():
run("ls /simple")
return env.host_string.split(':')[1]
with hide('everything'):
retval = execute(task)
eq_(retval, {'127.0.0.1:2200': '2200', '127.0.0.1:2201': '2201'})
@with_fakes
def test_should_work_with_Task_subclasses(self):
"""
should work for Task subclasses, not just WrappedCallableTask
"""
class MyTask(Task):
name = "mytask"
run = Fake(callable=True, expect_call=True)
mytask = MyTask()
execute(mytask)
class TestExecuteEnvInteractions(FabricTest):
def set_network(self):
# Don't update env.host/host_string/etc
pass
@server(port=2200)
@server(port=2201)
def test_should_not_mutate_its_own_env_vars(self):
"""
internal env changes should not bleed out, but task env changes should
"""
# Task that uses a handful of features which involve env vars
@parallel
@hosts('username@127.0.0.1:2200', 'username@127.0.0.1:2201')
def mytask():
run("ls /simple")
# Pre-assertions
assertions = {
'parallel': False,
'all_hosts': [],
'host': None,
'hosts': [],
'host_string': None
}
for key, value in assertions.items():
eq_(env[key], value)
# Run
with hide('everything'):
result = execute(mytask)
eq_(len(result), 2)
# Post-assertions
for key, value in assertions.items():
eq_(env[key], value)
@server()
def test_should_allow_task_to_modify_env_vars(self):
@hosts('username@127.0.0.1:2200')
def mytask():
run("ls /simple")
env.foo = "bar"
with hide('everything'):
execute(mytask)
eq_(env.foo, "bar")
eq_(env.host_string, None)
class TestTaskDetails(unittest.TestCase):
def test_old_style_task_with_default_args(self):
"""
__details__() should print docstr for old style task methods with default args
"""
def task_old_style(arg1, arg2, arg3=None, arg4='yes'):
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: arg1, arg2, arg3=None, arg4='yes'",
details)
def test_old_style_task_without_default_args(self):
"""
__details__() should print docstr for old style task methods without default args
"""
def task_old_style(arg1, arg2):
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: arg1, arg2",
details)
def test_old_style_task_without_args(self):
"""
__details__() should print docstr for old style task methods without args
"""
def task_old_style():
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: ",
details)
def test_decorated_task(self):
"""
__details__() should print docstr for method with any number and order of decorations
"""
expected = "\n".join([
"Docstring",
"Arguments: arg1",
])
@task
def decorated_task(arg1):
'''Docstring'''
actual = decorated_task.__details__()
eq_(expected, actual)
@runs_once
@task
def decorated_task1(arg1):
'''Docstring'''
actual = decorated_task1.__details__()
eq_(expected, actual)
@runs_once
@serial
@task
def decorated_task2(arg1):
'''Docstring'''
actual = decorated_task2.__details__()
eq_(expected, actual)
def test_subclassed_task(self):
"""
__details__() should print docstr for subclassed task methods with args
"""
class SpecificTask(Task):
def run(self, arg1, arg2, arg3):
'''Docstring'''
eq_("Docstring\n"
"Arguments: self, arg1, arg2, arg3",
SpecificTask().__details__())
@mock_streams('stdout')
def test_multiline_docstring_indented_correctly(self):
"""
display_command() should properly indent docstr for old style task methods
"""
def mytask(arg1):
"""
This is a multi line docstring.
For reals.
"""
try:
with patched_context(fabric.state, 'commands', {'mytask': mytask}):
display_command('mytask')
except SystemExit: # ugh
pass
eq_(
sys.stdout.getvalue(),
"""Displaying detailed information for task 'mytask':
This is a multi line docstring.
For reals.
Arguments: arg1
"""
)
| |
# encoding: utf-8
"""
attribute.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from struct import pack
from exabgp.bgp.message.notification import Notify
from exabgp.util.cache import Cache
# ==================================================================== Attribute
#
class Attribute (object):
# we need to define ID and FLAG inside of the subclasses
# otherwise we can not dynamically create different GenericAttribute
# ID = 0x00
# FLAG = 0x00
# Should this Attribute be cached
CACHING = False
# Registered subclasses we know how to decode
registered_attributes = dict()
# what this implementation knows as attributes
attributes_known = []
attributes_well_know = []
attributes_optional = []
# Are we caching Attributes (configuration)
caching = False
# The attribute cache per attribute ID
cache = {}
# ---------------------------------------------------------------------------
# XXX : FIXME : The API of ID is a bit different (it can be instanciated)
# XXX : FIXME : This is legacy. should we change to not be ?
class ID (int):
__slots__ = []
# This should move within the classes and not be here
# RFC 4271
ORIGIN = 0x01
AS_PATH = 0x02
NEXT_HOP = 0x03
MED = 0x04
LOCAL_PREF = 0x05
ATOMIC_AGGREGATE = 0x06
AGGREGATOR = 0x07
# RFC 1997
COMMUNITY = 0x08
# RFC 4456
ORIGINATOR_ID = 0x09
CLUSTER_LIST = 0x0A # 10
# RFC 4760
MP_REACH_NLRI = 0x0E # 14
MP_UNREACH_NLRI = 0x0F # 15
# RFC 4360
EXTENDED_COMMUNITY = 0x10 # 16
# RFC 4893
AS4_PATH = 0x11 # 17
AS4_AGGREGATOR = 0x12 # 18
# RFC6514
PMSI_TUNNEL = 0x16 # 22
# RFC5512
TUNNEL_ENCAP = 0x17 # 23
AIGP = 0x1A # 26
INTERNAL_NAME = 0xFFFC
INTERNAL_WITHDRAW = 0xFFFD
INTERNAL_WATCHDOG = 0xFFFE
INTERNAL_SPLIT = 0xFFFF
names = {
ORIGIN : 'origin',
AS_PATH : 'as-path',
NEXT_HOP : 'next-hop',
MED : 'med', # multi-exit-disc
LOCAL_PREF : 'local-preference',
ATOMIC_AGGREGATE : 'atomic-aggregate',
AGGREGATOR : 'aggregator',
COMMUNITY : 'community',
ORIGINATOR_ID : 'originator-id',
CLUSTER_LIST : 'cluster-list',
MP_REACH_NLRI : 'mp-reach-nlri', # multi-protocol reacheable nlri
MP_UNREACH_NLRI : 'mp-unreach-nlri', # multi-protocol unreacheable nlri
EXTENDED_COMMUNITY : 'extended-community',
AS4_PATH : 'as4-path',
AS4_AGGREGATOR : 'as4-aggregator',
PMSI_TUNNEL : 'pmsi-tunnel',
TUNNEL_ENCAP : 'tunnel-encaps',
AIGP : 'aigp',
0xfffc : 'internal-name',
0xfffd : 'internal-withdraw',
0xfffe : 'internal-watchdog',
0xffff : 'internal-split',
}
def __str__ (self):
return self.names.get(self,'unknown-attribute-%s' % hex(self))
def __repr__ (self):
return str(self)
@classmethod
def name (cls,self):
return cls.names.get(self,'unknown-attribute-%s' % hex(self))
# ---------------------------------------------------------------------------
class Flag (int):
EXTENDED_LENGTH = 0x10 # . 16 - 0001 0000
PARTIAL = 0x20 # . 32 - 0010 0000
TRANSITIVE = 0x40 # . 64 - 0100 0000
OPTIONAL = 0x80 # . 128 - 1000 0000
MASK_EXTENDED = 0xEF # . 239 - 1110 1111
MASK_PARTIAL = 0xDF # . 223 - 1101 1111
MASK_TRANSITIVE = 0xBF # . 191 - 1011 1111
MASK_OPTIONAL = 0x7F # . 127 - 0111 1111
__slots__ = []
def __str__ (self):
r = []
v = int(self)
if v & 0x10:
r.append("EXTENDED_LENGTH")
v -= 0x10
if v & 0x20:
r.append("PARTIAL")
v -= 0x20
if v & 0x40:
r.append("TRANSITIVE")
v -= 0x40
if v & 0x80:
r.append("OPTIONAL")
v -= 0x80
if v:
r.append("UNKNOWN %s" % hex(v))
return " ".join(r)
def matches (self,value):
return self | 0x10 == value | 0x10
# ---------------------------------------------------------------------------
def _attribute (self,value):
flag = self.FLAG
if flag & Attribute.Flag.OPTIONAL and not value:
return ''
length = len(value)
if length > 0xFF:
flag |= Attribute.Flag.EXTENDED_LENGTH
if flag & Attribute.Flag.EXTENDED_LENGTH:
len_value = pack('!H',length)
else:
len_value = chr(length)
return "%s%s%s%s" % (chr(flag),chr(self.ID),len_value,value)
def __eq__ (self,other):
return self.ID == other.ID
def __ne__ (self,other):
return self.ID != other.ID
@classmethod
def register_attribute (cls,attribute_id=None,flag=None):
aid = cls.ID if attribute_id is None else attribute_id
flg = cls.FLAG | Attribute.Flag.EXTENDED_LENGTH if flag is None else flag | Attribute.Flag.EXTENDED_LENGTH
if (aid,flg) in cls.registered_attributes:
raise RuntimeError('only one class can be registered per capability')
cls.registered_attributes[(aid,flg)] = cls
cls.attributes_known.append(aid)
if cls.FLAG & Attribute.Flag.OPTIONAL:
Attribute.attributes_optional.append(aid)
else:
Attribute.attributes_well_know.append(aid)
@classmethod
def registered (cls,attribute_id,flag):
return (attribute_id,flag | Attribute.Flag.EXTENDED_LENGTH) in cls.registered_attributes
@classmethod
def klass (cls,attribute_id,flag):
key = (attribute_id,flag | Attribute.Flag.EXTENDED_LENGTH)
if key in cls.registered_attributes:
kls = cls.registered_attributes[key]
kls.ID = attribute_id
return kls
# XXX: we do see some AS4_PATH with the partial instead of transitive bit set !!
if attribute_id == Attribute.ID.AS4_PATH:
kls = cls.attributes_known[attribute_id]
kls.ID = attribute_id
return kls
raise Notify (2,4,'can not handle attribute id %s' % attribute_id)
@classmethod
def unpack (cls,attribute_id,flag,data,negotiated):
cache = cls.caching and cls.CACHING
if cache and data in cls.cache.get(cls.ID,{}):
return cls.cache[cls.ID].retrieve(data)
key = (attribute_id,flag | Attribute.Flag.EXTENDED_LENGTH)
if key in Attribute.registered_attributes.keys():
instance = cls.klass(attribute_id,flag).unpack(data,negotiated)
if cache:
cls.cache.cache[cls.ID].cache(data,instance)
return instance
raise Notify (2,4,'can not handle attribute id %s' % attribute_id)
@classmethod
def setCache (cls):
if not cls.cache:
for attribute in Attribute.ID.names:
if attribute not in cls.cache:
cls.cache[attribute] = Cache()
Attribute.setCache()
| |
"""Imported from the recipes section of the itertools documentation.
All functions taken from the recipes section of the itertools library docs
[1]_.
Some backward-compatible usability improvements have been made.
.. [1] http://docs.python.org/library/itertools.html#recipes
"""
from collections import deque
from itertools import (
chain, combinations, count, cycle, groupby, islice, repeat, starmap, tee
)
import operator
from random import randrange, sample, choice
from six import PY2
from six.moves import filter, filterfalse, map, range, zip, zip_longest
__all__ = [
'accumulate',
'all_equal',
'consume',
'dotproduct',
'first_true',
'flatten',
'grouper',
'iter_except',
'ncycles',
'nth',
'padnone',
'pairwise',
'partition',
'powerset',
'quantify',
'random_combination_with_replacement',
'random_combination',
'random_permutation',
'random_product',
'repeatfunc',
'roundrobin',
'tabulate',
'tail',
'take',
'unique_everseen',
'unique_justseen',
]
def accumulate(iterable, func=operator.add):
"""
Return an iterator whose items are the accumulated results of a function
(specified by the optional *func* argument) that takes two arguments.
By default, returns accumulated sums with :func:`operator.add`.
>>> list(accumulate([1, 2, 3, 4, 5])) # Running sum
[1, 3, 6, 10, 15]
>>> list(accumulate([1, 2, 3], func=operator.mul)) # Running product
[1, 2, 6]
>>> list(accumulate([0, 1, -1, 2, 3, 2], func=max)) # Running maximum
[0, 1, 1, 2, 3, 3]
This function is available in the ``itertools`` module for Python 3.2 and
greater.
"""
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
else:
yield total
for element in it:
total = func(total, element)
yield total
def take(n, iterable):
"""Return first *n* items of the iterable as a list.
>>> take(3, range(10))
[0, 1, 2]
>>> take(5, range(3))
[0, 1, 2]
Effectively a short replacement for ``next`` based iterator consumption
when you want more than one item, but less than the whole iterator.
"""
return list(islice(iterable, n))
def tabulate(function, start=0):
"""Return an iterator over the results of ``func(start)``,
``func(start + 1)``, ``func(start + 2)``...
*func* should be a function that accepts one integer argument.
If *start* is not specified it defaults to 0. It will be incremented each
time the iterator is advanced.
>>> square = lambda x: x ** 2
>>> iterator = tabulate(square, -3)
>>> take(4, iterator)
[9, 4, 1, 0]
"""
return map(function, count(start))
def tail(n, iterable):
"""Return an iterator over the last *n* items of *iterable*.
>>> t = tail(3, 'ABCDEFG')
>>> list(t)
['E', 'F', 'G']
"""
return iter(deque(iterable, maxlen=n))
def consume(iterator, n=None):
"""Advance *iterable* by *n* steps. If *n* is ``None``, consume it
entirely.
Efficiently exhausts an iterator without returning values. Defaults to
consuming the whole iterator, but an optional second argument may be
provided to limit consumption.
>>> i = (x for x in range(10))
>>> next(i)
0
>>> consume(i, 3)
>>> next(i)
4
>>> consume(i)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
If the iterator has fewer items remaining than the provided limit, the
whole iterator will be consumed.
>>> i = (x for x in range(3))
>>> consume(i, 5)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"""Returns the nth item or a default value.
>>> l = range(10)
>>> nth(l, 3)
3
>>> nth(l, 20, "zebra")
'zebra'
"""
return next(islice(iterable, n, None), default)
def all_equal(iterable):
"""
Returns ``True`` if all the elements are equal to each other.
>>> all_equal('aaaa')
True
>>> all_equal('aaab')
False
"""
g = groupby(iterable)
return next(g, True) and not next(g, False)
def quantify(iterable, pred=bool):
"""Return the how many times the predicate is true.
>>> quantify([True, False, True])
2
"""
return sum(map(pred, iterable))
def padnone(iterable):
"""Returns the sequence of elements and then returns ``None`` indefinitely.
>>> take(5, padnone(range(3)))
[0, 1, 2, None, None]
Useful for emulating the behavior of the built-in :func:`map` function.
See also :func:`padded`.
"""
return chain(iterable, repeat(None))
def ncycles(iterable, n):
"""Returns the sequence elements *n* times
>>> list(ncycles(["a", "b"], 3))
['a', 'b', 'a', 'b', 'a', 'b']
"""
return chain.from_iterable(repeat(tuple(iterable), n))
def dotproduct(vec1, vec2):
"""Returns the dot product of the two iterables.
>>> dotproduct([10, 10], [20, 20])
400
"""
return sum(map(operator.mul, vec1, vec2))
def flatten(listOfLists):
"""Return an iterator flattening one level of nesting in a list of lists.
>>> list(flatten([[0, 1], [2, 3]]))
[0, 1, 2, 3]
See also :func:`collapse`, which can flatten multiple levels of nesting.
"""
return chain.from_iterable(listOfLists)
def repeatfunc(func, times=None, *args):
"""Call *func* with *args* repeatedly, returning an iterable over the
results.
If *times* is specified, the iterable will terminate after that many
repetitions:
>>> from operator import add
>>> times = 4
>>> args = 3, 5
>>> list(repeatfunc(add, times, *args))
[8, 8, 8, 8]
If *times* is ``None`` the iterable will not terminate:
>>> from random import randrange
>>> times = None
>>> args = 1, 11
>>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
[2, 4, 8, 1, 8, 4]
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times))
def pairwise(iterable):
"""Returns an iterator of paired items, overlapping, from the original
>>> take(4, pairwise(count()))
[(0, 1), (1, 2), (2, 3), (3, 4)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def grouper(n, iterable, fillvalue=None):
"""Collect data into fixed-length chunks or blocks.
>>> list(grouper(3, 'ABCDEFG', 'x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def roundrobin(*iterables):
"""Yields an item from each iterable, alternating between them.
>>> list(roundrobin('ABC', 'D', 'EF'))
['A', 'D', 'E', 'B', 'F', 'C']
See :func:`interleave_longest` for a slightly faster implementation.
"""
# Recipe credited to George Sakkis
pending = len(iterables)
if PY2:
nexts = cycle(iter(it).next for it in iterables)
else:
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def partition(pred, iterable):
"""
Returns a 2-tuple of iterables derived from the input iterable.
The first yields the items that have ``pred(item) == False``.
The second yields the items that have ``pred(item) == True``.
>>> is_odd = lambda x: x % 2 != 0
>>> iterable = range(10)
>>> even_items, odd_items = partition(is_odd, iterable)
>>> list(even_items), list(odd_items)
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
"""
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def powerset(iterable):
"""Yields all possible subsets of the iterable.
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def unique_everseen(iterable, key=None):
"""
Yield unique elements, preserving order.
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
Sequences with a mix of hashable and unhashable items can be used.
The function will be slower (i.e., `O(n^2)`) for unhashable items.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
if key is None:
for element in iterable:
try:
if element not in seenset:
seenset_add(element)
yield element
except TypeError as e:
if element not in seenlist:
seenlist_add(element)
yield element
else:
for element in iterable:
k = key(element)
try:
if k not in seenset:
seenset_add(k)
yield element
except TypeError as e:
if k not in seenlist:
seenlist_add(k)
yield element
def unique_justseen(iterable, key=None):
"""Yields elements in order, ignoring serial duplicates
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
def iter_except(func, exception, first=None):
"""Yields results from a function repeatedly until an exception is raised.
Converts a call-until-exception interface to an iterator interface.
Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
to end the loop.
>>> l = [0, 1, 2]
>>> list(iter_except(l.pop, IndexError))
[2, 1, 0]
"""
try:
if first is not None:
yield first()
while 1:
yield func()
except exception:
pass
def first_true(iterable, default=False, pred=None):
"""
Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item for which
``pred(item) == True`` .
>>> first_true(range(10))
1
>>> first_true(range(10), pred=lambda x: x > 5)
6
>>> first_true(range(10), default='missing', pred=lambda x: x > 9)
'missing'
"""
return next(filter(pred, iterable), default)
def random_product(*args, **kwds):
"""Draw an item at random from each of the input iterables.
>>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
('c', 3, 'Z')
If *repeat* is provided as a keyword argument, that many items will be
drawn from each iterable.
>>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
('a', 2, 'd', 3)
This equivalent to taking a random selection from
``itertools.product(*args, **kwarg)``.
"""
pools = [tuple(pool) for pool in args] * kwds.get('repeat', 1)
return tuple(choice(pool) for pool in pools)
def random_permutation(iterable, r=None):
"""Return a random *r* length permutation of the elements in *iterable*.
If *r* is not specified or is ``None``, then *r* defaults to the length of
*iterable*.
>>> random_permutation(range(5)) # doctest:+SKIP
(3, 4, 0, 1, 2)
This equivalent to taking a random selection from
``itertools.permutations(iterable, r)``.
"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(sample(pool, r))
def random_combination(iterable, r):
"""Return a random *r* length subsequence of the elements in *iterable*.
>>> random_combination(range(5), 3) # doctest:+SKIP
(2, 3, 4)
This equivalent to taking a random selection from
``itertools.combinations(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(sample(range(n), r))
return tuple(pool[i] for i in indices)
def random_combination_with_replacement(iterable, r):
"""Return a random *r* length subsequence of elements in *iterable*,
allowing individual elements to be repeated.
>>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
(0, 0, 1, 2, 2)
This equivalent to taking a random selection from
``itertools.combinations_with_replacement(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(randrange(n) for i in range(r))
return tuple(pool[i] for i in indices)
| |
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import TypeDecorator
from sqlalchemy import union
from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
class _ExprFixture(object):
def _test_table(self, type_):
test_table = Table(
"test_table", MetaData(), Column("x", String), Column("y", type_)
)
return test_table
def _fixture(self):
class MyString(String):
# supersedes any processing that might be on
# String
def bind_expression(self, bindvalue):
return func.lower(bindvalue)
def column_expression(self, col):
return func.lower(col)
return self._test_table(MyString)
def _type_decorator_outside_fixture(self):
class MyString(TypeDecorator):
impl = String
cache_ok = True
def bind_expression(self, bindvalue):
return func.outside_bind(bindvalue)
def column_expression(self, col):
return func.outside_colexpr(col)
return self._test_table(MyString)
def _type_decorator_inside_fixture(self):
class MyInsideString(String):
def bind_expression(self, bindvalue):
return func.inside_bind(bindvalue)
def column_expression(self, col):
return func.inside_colexpr(col)
class MyString(TypeDecorator):
impl = MyInsideString
cache_ok = True
return self._test_table(MyString)
def _type_decorator_both_fixture(self):
class MyDialectString(String):
def bind_expression(self, bindvalue):
return func.inside_bind(bindvalue)
def column_expression(self, col):
return func.inside_colexpr(col)
class MyString(TypeDecorator):
impl = String
cache_ok = True
# this works because when the compiler calls dialect_impl(),
# a copy of MyString is created which has just this impl
# as self.impl
def load_dialect_impl(self, dialect):
return MyDialectString()
# user-defined methods need to invoke explicitly on the impl
# for now...
def bind_expression(self, bindvalue):
return func.outside_bind(self.impl.bind_expression(bindvalue))
def column_expression(self, col):
return func.outside_colexpr(self.impl.column_expression(col))
return self._test_table(MyString)
def _variant_fixture(self, inner_fixture):
type_ = inner_fixture.c.y.type
variant = String().with_variant(type_, "default")
return self._test_table(variant)
def _dialect_level_fixture(self):
class ImplString(String):
def bind_expression(self, bindvalue):
return func.dialect_bind(bindvalue)
def column_expression(self, col):
return func.dialect_colexpr(col)
from sqlalchemy.engine import default
dialect = default.DefaultDialect()
dialect.colspecs = {String: ImplString}
return dialect
class SelectTest(_ExprFixture, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_select_cols(self):
table = self._fixture()
self.assert_compile(
select(table),
"SELECT test_table.x, lower(test_table.y) AS y FROM test_table",
)
def test_anonymous_expr(self):
table = self._fixture()
self.assert_compile(
select(cast(table.c.y, String)),
"SELECT CAST(test_table.y AS VARCHAR) AS y FROM test_table",
)
def test_select_cols_use_labels(self):
table = self._fixture()
self.assert_compile(
select(table).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT test_table.x AS test_table_x, "
"lower(test_table.y) AS test_table_y FROM test_table",
)
def test_select_cols_use_labels_result_map_targeting(self):
table = self._fixture()
compiled = (
select(table)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.compile()
)
assert table.c.y in compiled._create_result_map()["test_table_y"][1]
assert table.c.x in compiled._create_result_map()["test_table_x"][1]
# the lower() function goes into the result_map, we don't really
# need this but it's fine
self.assert_compile(
compiled._create_result_map()["test_table_y"][1][3],
"lower(test_table.y)",
)
# then the original column gets put in there as well.
# as of 1.1 it's important that it is first as this is
# taken as significant by the result processor.
self.assert_compile(
compiled._create_result_map()["test_table_y"][1][0], "test_table.y"
)
def test_insert_binds(self):
table = self._fixture()
self.assert_compile(
table.insert(),
"INSERT INTO test_table (x, y) VALUES (:x, lower(:y))",
)
self.assert_compile(
table.insert().values(y="hi"),
"INSERT INTO test_table (y) VALUES (lower(:y))",
)
def test_select_binds(self):
table = self._fixture()
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT test_table.x, lower(test_table.y) AS y FROM "
"test_table WHERE test_table.y = lower(:y_1)",
)
def test_in_binds(self):
table = self._fixture()
self.assert_compile(
select(table).where(
table.c.y.in_(["hi", "there", "some", "expr"])
),
"SELECT test_table.x, lower(test_table.y) AS y FROM "
"test_table WHERE test_table.y IN "
"([POSTCOMPILE_y_1~~lower(~~REPL~~)~~])",
render_postcompile=False,
)
self.assert_compile(
select(table).where(
table.c.y.in_(["hi", "there", "some", "expr"])
),
"SELECT test_table.x, lower(test_table.y) AS y FROM "
"test_table WHERE test_table.y IN "
"(lower(:y_1_1), lower(:y_1_2), lower(:y_1_3), lower(:y_1_4))",
render_postcompile=True,
)
def test_dialect(self):
table = self._fixture()
dialect = self._dialect_level_fixture()
# 'x' is straight String
self.assert_compile(
select(table.c.x).where(table.c.x == "hi"),
"SELECT dialect_colexpr(test_table.x) AS x "
"FROM test_table WHERE test_table.x = dialect_bind(:x_1)",
dialect=dialect,
)
def test_type_decorator_inner(self):
table = self._type_decorator_inside_fixture()
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT test_table.x, inside_colexpr(test_table.y) AS y "
"FROM test_table WHERE test_table.y = inside_bind(:y_1)",
)
def test_type_decorator_inner_plus_dialect(self):
table = self._type_decorator_inside_fixture()
dialect = self._dialect_level_fixture()
# for "inner", the MyStringImpl is a subclass of String, #
# so a dialect-level
# implementation supersedes that, which is the same as with other
# processor functions
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT dialect_colexpr(test_table.x) AS x, "
"dialect_colexpr(test_table.y) AS y FROM test_table "
"WHERE test_table.y = dialect_bind(:y_1)",
dialect=dialect,
)
def test_type_decorator_outer(self):
table = self._type_decorator_outside_fixture()
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT test_table.x, outside_colexpr(test_table.y) AS y "
"FROM test_table WHERE test_table.y = outside_bind(:y_1)",
)
def test_type_decorator_outer_plus_dialect(self):
table = self._type_decorator_outside_fixture()
dialect = self._dialect_level_fixture()
# for "outer", the MyString isn't calling the "impl" functions,
# so we don't get the "impl"
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT dialect_colexpr(test_table.x) AS x, "
"outside_colexpr(test_table.y) AS y "
"FROM test_table WHERE test_table.y = outside_bind(:y_1)",
dialect=dialect,
)
def test_type_decorator_both(self):
table = self._type_decorator_both_fixture()
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT test_table.x, "
"outside_colexpr(inside_colexpr(test_table.y)) AS y "
"FROM test_table WHERE "
"test_table.y = outside_bind(inside_bind(:y_1))",
)
def test_type_decorator_both_plus_dialect(self):
table = self._type_decorator_both_fixture()
dialect = self._dialect_level_fixture()
# for "inner", the MyStringImpl is a subclass of String,
# so a dialect-level
# implementation supersedes that, which is the same as with other
# processor functions
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT dialect_colexpr(test_table.x) AS x, "
"outside_colexpr(dialect_colexpr(test_table.y)) AS y "
"FROM test_table WHERE "
"test_table.y = outside_bind(dialect_bind(:y_1))",
dialect=dialect,
)
def test_type_decorator_both_w_variant(self):
table = self._variant_fixture(self._type_decorator_both_fixture())
self.assert_compile(
select(table).where(table.c.y == "hi"),
"SELECT test_table.x, "
"outside_colexpr(inside_colexpr(test_table.y)) AS y "
"FROM test_table WHERE "
"test_table.y = outside_bind(inside_bind(:y_1))",
)
def test_compound_select(self):
table = self._fixture()
s1 = select(table).where(table.c.y == "hi")
s2 = select(table).where(table.c.y == "there")
self.assert_compile(
union(s1, s2),
"SELECT test_table.x, lower(test_table.y) AS y "
"FROM test_table WHERE test_table.y = lower(:y_1) "
"UNION SELECT test_table.x, lower(test_table.y) AS y "
"FROM test_table WHERE test_table.y = lower(:y_2)",
)
def test_select_of_compound_select(self):
table = self._fixture()
s1 = select(table).where(table.c.y == "hi")
s2 = select(table).where(table.c.y == "there")
self.assert_compile(
union(s1, s2).alias().select(),
"SELECT anon_1.x, lower(anon_1.y) AS y FROM "
"(SELECT test_table.x AS x, test_table.y AS y "
"FROM test_table WHERE test_table.y = lower(:y_1) "
"UNION SELECT test_table.x AS x, test_table.y AS y "
"FROM test_table WHERE test_table.y = lower(:y_2)) AS anon_1",
)
class DerivedTest(_ExprFixture, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_select_from_select(self):
table = self._fixture()
self.assert_compile(
table.select().subquery().select(),
"SELECT anon_1.x, lower(anon_1.y) AS y FROM "
"(SELECT test_table.x "
"AS x, test_table.y AS y FROM test_table) AS anon_1",
)
def test_select_from_aliased_join(self):
table = self._fixture()
s1 = table.select().alias()
s2 = table.select().alias()
j = s1.join(s2, s1.c.x == s2.c.x)
s3 = j.select()
self.assert_compile(
s3,
"SELECT anon_1.x, lower(anon_1.y) AS y, anon_2.x AS x_1, "
"lower(anon_2.y) AS y_1 "
"FROM (SELECT test_table.x AS x, test_table.y AS y "
"FROM test_table) AS anon_1 JOIN (SELECT "
"test_table.x AS x, test_table.y AS y "
"FROM test_table) AS anon_2 ON anon_1.x = anon_2.x",
)
class RoundTripTestBase(object):
def test_round_trip(self, connection):
connection.execute(
self.tables.test_table.insert(),
[
{"x": "X1", "y": "Y1"},
{"x": "X2", "y": "Y2"},
{"x": "X3", "y": "Y3"},
],
)
# test insert coercion alone
eq_(
connection.exec_driver_sql(
"select * from test_table order by y"
).fetchall(),
[("X1", "y1"), ("X2", "y2"), ("X3", "y3")],
)
# conversion back to upper
eq_(
connection.execute(
select(self.tables.test_table).order_by(
self.tables.test_table.c.y
)
).fetchall(),
[("X1", "Y1"), ("X2", "Y2"), ("X3", "Y3")],
)
def test_targeting_no_labels(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(select(self.tables.test_table)).first()
eq_(row._mapping[self.tables.test_table.c.y], "Y1")
def test_targeting_by_string(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(select(self.tables.test_table)).first()
eq_(row._mapping["y"], "Y1")
def test_targeting_apply_labels(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(
select(self.tables.test_table).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
).first()
eq_(row._mapping[self.tables.test_table.c.y], "Y1")
def test_targeting_individual_labels(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(
select(
self.tables.test_table.c.x.label("xbar"),
self.tables.test_table.c.y.label("ybar"),
)
).first()
eq_(row._mapping[self.tables.test_table.c.y], "Y1")
class StringRoundTripTest(fixtures.TablesTest, RoundTripTestBase):
@classmethod
def define_tables(cls, metadata):
class MyString(String):
def bind_expression(self, bindvalue):
return func.lower(bindvalue)
def column_expression(self, col):
return func.upper(col)
Table(
"test_table",
metadata,
Column("x", String(50)),
Column("y", MyString(50)),
)
class TypeDecRoundTripTest(fixtures.TablesTest, RoundTripTestBase):
@classmethod
def define_tables(cls, metadata):
class MyString(TypeDecorator):
impl = String
cache_ok = True
def bind_expression(self, bindvalue):
return func.lower(bindvalue)
def column_expression(self, col):
return func.upper(col)
Table(
"test_table",
metadata,
Column("x", String(50)),
Column("y", MyString(50)),
)
class ReturningTest(fixtures.TablesTest):
__requires__ = ("returning",)
@classmethod
def define_tables(cls, metadata):
class MyString(String):
def column_expression(self, col):
return func.lower(col)
Table(
"test_table",
metadata,
Column("x", String(50)),
Column("y", MyString(50), server_default="YVALUE"),
)
@testing.provide_metadata
def test_insert_returning(self, connection):
table = self.tables.test_table
result = connection.execute(
table.insert().returning(table.c.y), {"x": "xvalue"}
)
eq_(result.first(), ("yvalue",))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Aaron Rosen, Nicira Networks, Inc.
# @author: Bob Kukura, Red Hat, Inc.
from sqlalchemy import func
from sqlalchemy.orm import exc
from neutron.common import exceptions as q_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import log as logging
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_models_v2
LOG = logging.getLogger(__name__)
def initialize():
db.configure_db()
def get_network_binding(session, network_id):
session = session or db.get_session()
try:
binding = (session.query(ovs_models_v2.NetworkBinding).
filter_by(network_id=network_id).
one())
return binding
except exc.NoResultFound:
return
def add_network_binding(session, network_id, network_type,
physical_network, segmentation_id):
with session.begin(subtransactions=True):
binding = ovs_models_v2.NetworkBinding(network_id, network_type,
physical_network,
segmentation_id)
session.add(binding)
def get_port_forwarding(session, port_id):
session = session or db.get_session()
try:
forward = (session.query(ovs_models_v2.PortForwarding).
filter_by(port_id=port_id).one())
return forward['forward_ports']
except exc.NoResultFound:
return
def clear_port_forwarding(session, port_id):
with session.begin(subtransactions=True):
try:
# Get rid of old port bindings
forward = (session.query(ovs_models_v2.PortForwarding).
filter_by(port_id=port_id).one())
if forward:
session.delete(forward)
except exc.NoResultFound:
pass
def add_port_forwarding(session, port_id, forward_ports):
with session.begin(subtransactions=True):
forward = ovs_models_v2.PortForwarding(port_id, forward_ports)
session.add(forward)
def sync_vlan_allocations(network_vlan_ranges):
"""Synchronize vlan_allocations table with configured VLAN ranges."""
session = db.get_session()
with session.begin():
# get existing allocations for all physical networks
allocations = dict()
allocs = (session.query(ovs_models_v2.VlanAllocation).
all())
for alloc in allocs:
if alloc.physical_network not in allocations:
allocations[alloc.physical_network] = set()
allocations[alloc.physical_network].add(alloc)
# process vlan ranges for each configured physical network
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
# determine current configured allocatable vlans for this
# physical network
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
# remove from table unallocated vlans not currently allocatable
if physical_network in allocations:
for alloc in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(alloc.vlan_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing vlan %(vlan_id)s on "
"physical network "
"%(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network': physical_network})
session.delete(alloc)
del allocations[physical_network]
# add missing allocatable vlans to table
for vlan_id in sorted(vlan_ids):
alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id)
session.add(alloc)
# remove from table unallocated vlans for any unconfigured physical
# networks
for allocs in allocations.itervalues():
for alloc in allocs:
if not alloc.allocated:
LOG.debug(_("Removing vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network': alloc.physical_network})
session.delete(alloc)
def get_vlan_allocation(physical_network, vlan_id):
session = db.get_session()
try:
alloc = (session.query(ovs_models_v2.VlanAllocation).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
one())
return alloc
except exc.NoResultFound:
return
def reserve_vlan(session):
with session.begin(subtransactions=True):
alloc = (session.query(ovs_models_v2.VlanAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Reserving vlan %(vlan_id)s on physical network "
"%(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network': alloc.physical_network})
alloc.allocated = True
return (alloc.physical_network, alloc.vlan_id)
raise q_exc.NoNetworkAvailable()
def reserve_specific_vlan(session, physical_network, vlan_id):
with session.begin(subtransactions=True):
try:
alloc = (session.query(ovs_models_v2.VlanAllocation).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
with_lockmode('update').
one())
if alloc.allocated:
if vlan_id == constants.FLAT_VLAN_ID:
raise q_exc.FlatNetworkInUse(
physical_network=physical_network)
else:
raise q_exc.VlanIdInUse(vlan_id=vlan_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
alloc.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
"network %(physical_network)s outside pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id)
alloc.allocated = True
session.add(alloc)
def release_vlan(session, physical_network, vlan_id, network_vlan_ranges):
with session.begin(subtransactions=True):
try:
alloc = (session.query(ovs_models_v2.VlanAllocation).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
with_lockmode('update').
one())
alloc.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]:
inside = True
break
if not inside:
session.delete(alloc)
LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
"%(physical_network)s outside pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
else:
LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
"%(physical_network)s to pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
except exc.NoResultFound:
LOG.warning(_("vlan_id %(vlan_id)s on physical network "
"%(physical_network)s not found"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
def sync_tunnel_allocations(tunnel_id_ranges):
"""Synchronize tunnel_allocations table with configured tunnel ranges."""
# determine current configured allocatable tunnels
tunnel_ids = set()
for tunnel_id_range in tunnel_id_ranges:
tun_min, tun_max = tunnel_id_range
if tun_max + 1 - tun_min > 1000000:
LOG.error(_("Skipping unreasonable tunnel ID range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
tunnel_ids |= set(xrange(tun_min, tun_max + 1))
session = db.get_session()
with session.begin():
# remove from table unallocated tunnels not currently allocatable
allocs = (session.query(ovs_models_v2.TunnelAllocation).
all())
for alloc in allocs:
try:
# see if tunnel is allocatable
tunnel_ids.remove(alloc.tunnel_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing tunnel %s from pool"),
alloc.tunnel_id)
session.delete(alloc)
# add missing allocatable tunnels to table
for tunnel_id in sorted(tunnel_ids):
alloc = ovs_models_v2.TunnelAllocation(tunnel_id)
session.add(alloc)
def get_tunnel_allocation(tunnel_id):
session = db.get_session()
try:
alloc = (session.query(ovs_models_v2.TunnelAllocation).
filter_by(tunnel_id=tunnel_id).
with_lockmode('update').
one())
return alloc
except exc.NoResultFound:
return
def reserve_tunnel(session):
with session.begin(subtransactions=True):
alloc = (session.query(ovs_models_v2.TunnelAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Reserving tunnel %s from pool"), alloc.tunnel_id)
alloc.allocated = True
return alloc.tunnel_id
raise q_exc.NoNetworkAvailable()
def reserve_specific_tunnel(session, tunnel_id):
with session.begin(subtransactions=True):
try:
alloc = (session.query(ovs_models_v2.TunnelAllocation).
filter_by(tunnel_id=tunnel_id).
with_lockmode('update').
one())
if alloc.allocated:
raise q_exc.TunnelIdInUse(tunnel_id=tunnel_id)
LOG.debug(_("Reserving specific tunnel %s from pool"), tunnel_id)
alloc.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific tunnel %s outside pool"),
tunnel_id)
alloc = ovs_models_v2.TunnelAllocation(tunnel_id)
alloc.allocated = True
session.add(alloc)
def release_tunnel(session, tunnel_id, tunnel_id_ranges):
with session.begin(subtransactions=True):
try:
alloc = (session.query(ovs_models_v2.TunnelAllocation).
filter_by(tunnel_id=tunnel_id).
with_lockmode('update').
one())
alloc.allocated = False
inside = False
for tunnel_id_range in tunnel_id_ranges:
if (tunnel_id >= tunnel_id_range[0]
and tunnel_id <= tunnel_id_range[1]):
inside = True
break
if not inside:
session.delete(alloc)
LOG.debug(_("Releasing tunnel %s outside pool"), tunnel_id)
else:
LOG.debug(_("Releasing tunnel %s to pool"), tunnel_id)
except exc.NoResultFound:
LOG.warning(_("tunnel_id %s not found"), tunnel_id)
def get_port(port_id):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
except exc.NoResultFound:
port = None
return port
def get_port_from_device(port_id):
"""Get port from database."""
LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id == port_id)
port_and_sgs = query.all()
if not port_and_sgs:
return None
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict[ext_sg.SECURITYGROUPS] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def set_port_status(port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
def get_tunnel_endpoints():
session = db.get_session()
tunnels = session.query(ovs_models_v2.TunnelEndpoint)
return [{'id': tunnel.id,
'ip_address': tunnel.ip_address} for tunnel in tunnels]
def _generate_tunnel_id(session):
max_tunnel_id = session.query(
func.max(ovs_models_v2.TunnelEndpoint.id)).scalar() or 0
return max_tunnel_id + 1
def add_tunnel_endpoint(ip, max_retries=10):
"""Return the endpoint of the given IP address or generate a new one."""
# NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a
# repeatedly executed transactional block to ensure it
# doesn't conflict with any other concurrently executed
# DB transactions in spite of the specified transactions
# isolation level value
for i in xrange(max_retries):
LOG.debug(_('Adding a tunnel endpoint for %s'), ip)
try:
session = db.get_session()
with session.begin(subtransactions=True):
tunnel = (session.query(ovs_models_v2.TunnelEndpoint).
filter_by(ip_address=ip).with_lockmode('update').
first())
if tunnel is None:
tunnel_id = _generate_tunnel_id(session)
tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id)
session.add(tunnel)
return tunnel
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been commited, try again
LOG.debug(_('Adding a tunnel endpoint failed due to a concurrent'
'transaction had been commited (%s attempts left)'),
max_retries - (i + 1))
raise q_exc.NeutronException(
message=_('Unable to generate a new tunnel id'))
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for GradientDescent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class GradientDescentOptimizerTest(test.TestCase):
def testBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
def testBasicResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
def testMinimizeResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(var0, x) + var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
pred += var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lrate = constant_op.constant(3.0)
sgd_op = gradient_descent.GradientDescentOptimizer(
lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
def testGradWrtRef(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
opt = gradient_descent.GradientDescentOptimizer(3.0)
values = [1.0, 3.0]
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_)
variables.global_variables_initializer().run()
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], grad.eval())
def testWithGlobalStep(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
global_step = variables.Variable(0, trainable=False)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
self.assertAllCloseAccordingToType(1, global_step.eval())
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0], [2.0]], var0.eval())
self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]],
var0.eval())
self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]],
var1.eval())
if __name__ == "__main__":
test.main()
| |
import numpy as np
def write_array(fname, xa):
fp = open(fname,"w")
for x in xa:
s = '%lf\n' % x
fp.write(s)
fp.close()
def read_array(fname):
fp = open(fname,"r")
fl = fp.readlines()
x = np.fromfile(fname, dtype=np.float32, sep='\n')
fp.close()
return x
def write_two_arrays(fname, xa, ya):
fp = open(fname,"w")
for i in range(len(xa)):
s = '% 10e\t% 10e\n' % (xa[i],ya[i])
fp.write(s)
fp.close()
def read_two_arrays(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)
x = np.zeros(n)
y = np.zeros(n)
for i in range(n):
x[i] = float(fl[i].split()[0])
y[i] = float(fl[i].split()[1])
return x,y
def write_three_arrays(fname, xa, ya, za):
fp = open(fname,"w")
for i in range(len(xa)):
s = '% 10e\t% 10e\t% 10e\n' % (xa[i],ya[i],za[i])
fp.write(s)
fp.close()
def read_three_arrays(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)
x = np.zeros(n)
y = np.zeros(n)
z = np.zeros(n)
for i in range(n):
x[i] = float(fl[i].split()[0])
y[i] = float(fl[i].split()[1])
z[i] = float(fl[i].split()[2])
return x,y,z
def write_four_arrays(fname, a, b, c, d):
fp = open(fname,"w")
for i in range(len(a)):
s = '% 10e\t% 10e\t% 10e\t% 10e\n' % (a[i],b[i],c[i],d[i])
fp.write(s)
fp.close()
def read_four_arrays(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
d = np.zeros(n)
for i in range(n):
a[i] = float(fl[i].split()[0])
b[i] = float(fl[i].split()[1])
c[i] = float(fl[i].split()[2])
d[i] = float(fl[i].split()[3])
return a,b,c,d
def read_four_arrays_nlines(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)-1
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
d = np.zeros(n)
for i in range(n):
a[i] = float(fl[i+1].split()[0])
b[i] = float(fl[i+1].split()[1])
c[i] = float(fl[i+1].split()[2])
d[i] = float(fl[i+1].split()[3])
return a,b,c,d
def write_five_arrays(fname, a, b, c, d, f):
fp = open(fname,"w")
for i in range(len(a)):
s = '% 10e\t% 10e\t% 10e\t% 10e\t% 10e\n' % (a[i],b[i],c[i],d[i],f[i])
fp.write(s)
fp.close()
def write_five_arrays_nlines(fname, a, b, c, d, f):
fp = open(fname,"w")
s = "%s\n" % len(a)
fp.write(s)
for i in range(len(a)):
s = '% 10e\t% 10e\t% 10e\t% 10e\t% 10e\n' % (a[i],b[i],c[i],d[i],f[i])
fp.write(s)
fp.close()
def read_five_arrays(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
d = np.zeros(n)
f = np.zeros(n)
for i in range(n):
a[i] = float(fl[i].split()[0])
b[i] = float(fl[i].split()[1])
c[i] = float(fl[i].split()[2])
d[i] = float(fl[i].split()[3])
f[i] = float(fl[i].split()[4])
return a,b,c,d,f
def read_seven_arrays_nlines(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)-1
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
d = np.zeros(n)
f = np.zeros(n)
g = np.zeros(n)
h = np.zeros(n)
for i in range(n):
a[i] = float(fl[i+1].split()[0])
b[i] = float(fl[i+1].split()[1])
c[i] = float(fl[i+1].split()[2])
d[i] = float(fl[i+1].split()[3])
f[i] = float(fl[i+1].split()[4])
g[i] = float(fl[i+1].split()[5])
h[i] = float(fl[i+1].split()[6])
return a,b,c,d,f,g,h
def read_seven_arrays(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
d = np.zeros(n)
f = np.zeros(n)
g = np.zeros(n)
h = np.zeros(n)
for i in range(n):
a[i] = float(fl[i].split()[0])
b[i] = float(fl[i].split()[1])
c[i] = float(fl[i].split()[2])
d[i] = float(fl[i].split()[3])
f[i] = float(fl[i].split()[4])
g[i] = float(fl[i].split()[5])
h[i] = float(fl[i].split()[6])
return a,b,c,d,f,g,h
def read_eight_arrays(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
d = np.zeros(n)
f = np.zeros(n)
g = np.zeros(n)
h = np.zeros(n)
j = np.zeros(n)
for i in range(n):
a[i] = float(fl[i].split()[0])
b[i] = float(fl[i].split()[1])
c[i] = float(fl[i].split()[2])
d[i] = float(fl[i].split()[3])
f[i] = float(fl[i].split()[4])
g[i] = float(fl[i].split()[5])
h[i] = float(fl[i].split()[6])
j[i] = float(fl[i].split()[7])
return a,b,c,d,f,g,h,j
def read_twelve_arrays_nline(fname):
fp = open(fname,"r")
fl = fp.readlines()
n = len(fl)-1
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
d = np.zeros(n)
f = np.zeros(n)
g = np.zeros(n)
h = np.zeros(n)
j = np.zeros(n)
k = np.zeros(n)
l = np.zeros(n)
m = np.zeros(n)
p = np.zeros(n)
for i in range(n):
a[i] = float(fl[i+1].split()[0])
b[i] = float(fl[i+1].split()[1])
c[i] = float(fl[i+1].split()[2])
d[i] = float(fl[i+1].split()[3])
f[i] = float(fl[i+1].split()[4])
g[i] = float(fl[i+1].split()[5])
h[i] = float(fl[i+1].split()[6])
j[i] = float(fl[i+1].split()[7])
k[i] = float(fl[i+1].split()[8])
l[i] = float(fl[i+1].split()[9])
m[i] = float(fl[i+1].split()[10])
p[i] = float(fl[i+1].split()[11])
return a,b,c,d,f,g,h,j,k,l,m,p
| |
# Author: Andrew.M.G.Reynen
from __future__ import print_function
from future.utils import iteritems
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import Qt
from lazylyst.UI.Configuration import Ui_ConfDialog
from Actions import Action, ActionSetupDialog
# Configuration dialog
class ConfDialog(QtWidgets.QDialog, Ui_ConfDialog):
def __init__(self,parent=None,main=None,actions=None,
pref=None,hotVar=None):
QtWidgets.QDialog.__init__(self,parent)
self.setupUi(self)
self.main=main
self.pref=pref
self.act=actions
self.hotVar=hotVar
# Give the dialog some functionaly
self.setFunctionality()
# Load in the previous lists of preferences and actions
self.loadLists()
# Set up some functionality to the configuration dialog
def setFunctionality(self):
# Key press events (also includes left double click)
self.confPrefList.keyPressedSignal.connect(self.prefListKeyEvent)
self.confActiveList.keyPressedSignal.connect(self.actionListKeyEvent)
self.confPassiveList.keyPressedSignal.connect(self.actionListKeyEvent)
# Right click menus for the action lists
self.confActiveList.setContextMenuPolicy(Qt.CustomContextMenu)
self.confPassiveList.setContextMenuPolicy(Qt.CustomContextMenu)
self.confActiveList.customContextMenuRequested.connect(self.createActionMenu)
self.confPassiveList.customContextMenuRequested.connect(self.createActionMenu)
# If the ordering of the passive list ever changes, update actPassiveOrder
self.confPassiveList.leaveSignal.connect(self.updatePassiveOrder)
# Add and delete action buttons
self.confActiveAddButton.clicked.connect(lambda: self.addDelClicked('active','add'))
self.confActiveDelButton.clicked.connect(lambda: self.addDelClicked('active','del'))
self.confPassiveAddButton.clicked.connect(lambda: self.addDelClicked('passive','add'))
self.confPassiveDelButton.clicked.connect(lambda: self.addDelClicked('passive','del'))
# Load in all of the lists from previous state
def loadLists(self):
# Add the passive actions in order
for key in self.main.actPassiveOrder:
self.confPassiveList.addItem(self.actionItem(key))
# Add the active actions (will be alphabetically ordered)
for key in [key for key in self.act.keys() if not self.act[key].passive]:
self.confActiveList.addItem(self.actionItem(key))
# Add the preferences
for key in self.pref.keys():
item=QtWidgets.QListWidgetItem()
item.setText(key)
self.setItemSleepColor(item,False)
item.setToolTip(self.pref[key].tip)
self.confPrefList.addItem(item)
# Handle add and delete action button click...
# ...mocks key press events
def addDelClicked(self,whichList,addDel):
# Set the appropriate current list
self.curList=self.confActiveList if whichList=='active' else self.confPassiveList
# Set the current lists button press
self.curList.key=Qt.Key_Insert if addDel=='add' else Qt.Key_Delete
# Forward this to the key press event handles
self.actionListKeyEvent(mock=True)
# Return which action list is in focus
def getCurActionList(self):
curList=None
if self.confActiveList.hasFocus() or self.confPassiveList.hasFocus():
curList=self.confActiveList if self.confActiveList.hasFocus() else self.confPassiveList
return curList
# Function to handle calls to the preference lists
def prefListKeyEvent(self):
# If the preference list had focus (again "backspace" is triggered by double click)
if self.confPrefList.key==Qt.Key_Backspace:
# Grab the key, and update if possible
item=self.confPrefList.currentItem()
if item.isSelected():
self.pref[item.text()].update(self)
# Function to handle calls to the active and passive lists
def actionListKeyEvent(self,mock=False):
# If this was not a mock event (ie. actually triggered by key press)
if not mock:
self.curList=self.getCurActionList()
# See if either of the action lists had focus, otherwise skip
if self.curList==None:
return
# Skip if no accepted keys were passed
if self.curList.key not in [Qt.Key_Insert,Qt.Key_Backspace,Qt.Key_Delete]:
return
# Creating a new action (Insert Key)
if self.curList.key==Qt.Key_Insert:
self.createAction()
return
# Skip if no action was selected
if self.curList.currentItem() is None:
return
if not self.curList.currentItem().isSelected():
return
# Mark if the current action is currently in use
inUse=(self.curList.currentItem().text() in list(self.main.qTimers.keys())+list(self.main.qThreads.keys()))
# Updating an action (Backspace Key -> which is triggered by double click)
if self.curList.key==Qt.Key_Backspace:
self.updateAction(inUse=inUse)
# Delete an action (Delete Key)
elif self.curList.key==Qt.Key_Delete:
self.deleteAction(inUse=inUse)
# Update the passive order every time (ie. do not care how configuration dialog is closed)...
# ...a passive action is added or edited
self.updatePassiveOrder()
# Assign the tool tip to an action
def actionItem(self,key):
item=QtWidgets.QListWidgetItem()
item.setText(key)
# Set the tip (which is the trigger value)
if not self.act[key].passive:
try:
item.setToolTip('Keybind: '+self.act[key].trigger.toString())
except:
item.setToolTip('Keybind: '+self.act[key].trigger)
else:
self.setPassiveTip(item)
# Set the color of the item displaying the sleep state
self.setItemSleepColor(item,self.act[key].sleeping)
return item
# Set a passive items tip
def setPassiveTip(self,item):
triggers=self.act[item.text()].trigger
# Don't fill the entire screen if many triggers
if len(triggers)>3:
item.setToolTip('Activated by: '+','.join(triggers[:3]+['...']))
else:
item.setToolTip('Activated by: '+','.join(triggers))
# Update the selected action from the specified list
def updateAction(self,inUse=False):
# Let user know if edits will be accepted later
if inUse:
print('Only trigger edits are allowed as action is strolling or scheming')
# Open the action set-up dialog with selected action
action=self.openActionSetup(self.act[self.curList.currentItem().text()],tempLock=inUse)
# Do not update if no action returned or action is in use
if action==None or inUse:
return
# Remove the old action
self.act.pop(self.curList.currentItem().text())
# Update the action dictionary with new
self.act[action.tag]=action
# If the action change had anything to do with an active action
if (self.curList==self.confActiveList and action.passive) or (not action.passive):
self.curList.takeItem(self.curList.currentRow())
oList=self.confPassiveList if action.passive else self.confActiveList
oList.addItem(self.actionItem(action.tag))
# Otherwise just update the existing passive item (to preserve the passive order)
else:
self.curList.currentItem().setText(action.tag)
self.setPassiveTip(self.curList.currentItem())
# Open the action set-up dialog with a blank new action
def createAction(self):
# Make the new action
if self.confActiveList==self.curList:
action=self.openActionSetup(Action(passive=False,trigger='Set Trigger'))
else:
action=self.openActionSetup(Action(passive=True,trigger=[]))
self.insertLazyAction(action)
# Insert a new action
def insertLazyAction(self,action):
if action==None:
return
# Insert new action to the action dictionary
self.act[action.tag]=action
# Create the item for the list widget
item=self.actionItem(action.tag)
# Add the action to the appropriate list
if action.passive:
self.confPassiveList.addItem(item)
else:
self.confActiveList.addItem(item)
# Remove the selected action from the specified list
def deleteAction(self,inUse=False):
if inUse:
print('Cannot delete an action which is strolling or scheming')
return
actTag=self.curList.currentItem().text()
# If this is a locked action, the user cannot delete it
if self.act[actTag].locked:
print(actTag+' is a built-in action, it cannot be deleted')
return
# Remove from the list
self.curList.takeItem(self.curList.currentRow())
# As well as from the action dictionary
self.act.pop(actTag)
# Menu activated upon right clicking of action entries
def createActionMenu(self,pos):
# Get the key of the clicked on action
self.curList=self.getCurActionList()
item=self.curList.currentItem()
if not item.isSelected():
return
self.menuAction=self.act[str(item.text())]
# Can not edit locked actions, skip if locked
if self.menuAction.locked:
print('Cannot sleep or copy locked actions')
return
# Create the menu, and fill with options...
self.actionMenu= QtWidgets.QMenu()
# ...sleep or awake
if self.menuAction.sleeping:
menuItem=self.actionMenu.addAction("Awake")
else:
menuItem=self.actionMenu.addAction("Sleep")
menuItem.triggered.connect(self.toggleSleep)
# ...copy the action
menuItem=self.actionMenu.addAction("Copy")
menuItem.triggered.connect(self.copyAction)
# Move to cursor position and show
parentPosition = self.curList.mapToGlobal(QtCore.QPoint(0, 0))
self.actionMenu.move(parentPosition + pos)
self.actionMenu.show()
# Toggle the action sleeping state on/off
def toggleSleep(self):
item=self.curList.currentItem()
if self.menuAction.sleeping:
self.menuAction.sleeping=False
else:
self.menuAction.sleeping=True
self.setItemSleepColor(item,self.menuAction.sleeping)
# Set the color of a list widget item based on the actions sleeping state
def setItemSleepColor(self,item,sleeping):
if sleeping:
item.setForeground(QtGui.QColor(150,150,150))
else:
item.setForeground(QtGui.QColor(40,40,40))
# Copy an action, triggered from the action menu
def copyAction(self):
# Make a copy of the action
newAction=Action()
for key,val in iteritems(self.menuAction.__dict__):
if key!='func':
setattr(newAction,key,val)
newAction.linkToFunction(self.main)
# Set up the new actions tag
seenKeys=self.act.keys()
i=0
while self.menuAction.tag+'('+str(i)+')' in seenKeys:
i+=1
# Assign the unique tag, and give a meaningless trigger (if an active action)
newAction.tag=self.menuAction.tag+'('+str(i)+')'
if not newAction.passive:
print('Update '+newAction.tag+' trigger value, will be deleted upon configuration closure otherwise')
newAction.trigger='Set Trigger'
# Insert the copied action
self.insertLazyAction(newAction)
# Update the passive list ordering to what it seen by the user
def updatePassiveOrder(self):
self.main.actPassiveOrder=self.confPassiveList.visualListOrder()
# Open the setup action dialog
def openActionSetup(self,action,tempLock=False):
self.dialog=ActionSetupDialog(self.main,action,self.act,
self.hotVar,self.pref,tempLock)
if self.dialog.exec_():
action=self.dialog.returnAction()
return action
# On close, make sure that no new actions have incorrect triggers
def closeEvent(self,ev):
popKeys=[actKey for actKey in self.act.keys() if (self.act[actKey].trigger=='Set Trigger' and
not self.act[actKey].passive)]
for key in popKeys:
print('Action '+key+' was removed, as its trigger value was not set')
self.act.pop(key)
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.xmlrpc import client as xmlrpc_client
import os
import mock
from flexget.plugins.clients.rtorrent import RTorrent
torrent_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'private.torrent')
torrent_url = 'file:///%s' % torrent_file
torrent_info_hash = '09977FE761B8D293AD8A929CCAF2E9322D525A6C'
with open(torrent_file, 'rb') as tor_file:
torrent_raw = tor_file.read()
def compare_binary(obj1, obj2):
# Used to compare xmlrpclib.binary objects within a mocked call
if not isinstance(obj1, type(obj2)):
return False
if obj1.data != obj2.data:
return False
return True
class Matcher(object):
def __init__(self, compare, some_obj):
self.compare = compare
self.some_obj = some_obj
def __eq__(self, other):
return self.compare(self.some_obj, other)
@mock.patch('flexget.plugins.clients.rtorrent.xmlrpc_client.ServerProxy')
class TestRTorrentClient(object):
def test_version(self, mocked_proxy):
mocked_client = mocked_proxy()
mocked_client.system.client_version.return_value = '0.9.4'
client = RTorrent('http://localhost/RPC2')
assert client.version == [0, 9, 4]
assert mocked_client.system.client_version.called
def test_load(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.execute.throw.return_value = 0
mocked_proxy.load.raw_start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.load(
torrent_raw,
fields={'priority': 3, 'directory': '/data/downloads', 'custom1': 'testing'},
start=True,
mkdir=True,
)
assert resp == 0
# Ensure mkdir was called
mocked_proxy.execute.throw.assert_called_with('', 'mkdir', '-p', '/data/downloads')
# Ensure load was called
assert mocked_proxy.load.raw_start.called
match_binary = Matcher(compare_binary, xmlrpc_client.Binary(torrent_raw))
called_args = mocked_proxy.load.raw_start.call_args_list[0][0]
assert len(called_args) == 5
assert '' == called_args[0]
assert match_binary in called_args
fields = [p for p in called_args[2:]]
assert len(fields) == 3
assert 'd.directory.set=\\/data\\/downloads' in fields
assert 'd.custom1.set=testing' in fields
assert 'd.priority.set=3' in fields
def test_torrent(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['/data/downloads'], ['private.torrent'], [torrent_info_hash], ['test_custom1'], [123456]
]
client = RTorrent('http://localhost/RPC2')
torrent = client.torrent(torrent_info_hash, fields=['custom1', 'down_rate']) # Required fields should be added
assert isinstance(torrent, dict)
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('hash') == torrent_info_hash
assert torrent.get('custom1') == 'test_custom1'
assert torrent.get('name') == 'private.torrent'
assert torrent.get('down_rate') == 123456
assert mocked_proxy.system.multicall.called_with(([
{'params': (torrent_info_hash,), 'methodName': 'd.base_path'},
{'params': (torrent_info_hash,), 'methodName': 'd.name'},
{'params': (torrent_info_hash,), 'methodName': 'd.hash'},
{'params': (torrent_info_hash,), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash,), 'methodName': 'd.down.rate'},
]))
def test_torrents(self, mocked_proxy):
mocked_proxy = mocked_proxy()
hash1 = '09977FE761AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
hash2 = '09977FE761BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'
mocked_proxy.d.multicall.return_value = (
['/data/downloads', 'private.torrent', hash1, 'test_custom1'],
['/data/downloads', 'private.torrent', hash2, 'test_custom2'],
)
client = RTorrent('http://localhost/RPC2')
torrents = client.torrents(fields=['custom1']) # Required fields should be added
assert isinstance(torrents, list)
for torrent in torrents:
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('name') == 'private.torrent'
if torrent.get('hash') == hash1:
assert torrent.get('custom1') == 'test_custom1'
elif torrent.get('hash') == hash2:
assert torrent.get('custom1') == 'test_custom2'
else:
assert False, 'Invalid hash returned'
assert mocked_proxy.system.multicall.called_with((
['main', 'd.directory_base=', 'd.name=', 'd.hash=', u'd.custom1='],
))
def test_update(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [[0]]
client = RTorrent('http://localhost/RPC2')
update_fields = {
'custom1': 'test_custom1',
'directory_base': '/data/downloads',
'priority': 3,
}
resp = client.update(torrent_info_hash, fields=update_fields)
assert resp == 0
assert mocked_proxy.system.multicall.called_with(([
{'params': (torrent_info_hash, '/data/downloads'), 'methodName': 'd.directory_base'},
{'params': (torrent_info_hash, 'test_custom1'), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash, '/data/downloads'), 'methodName': 'd.custom1'}
]))
def test_delete(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.erase.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.delete(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.erase.called_with((torrent_info_hash,))
def test_move(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['private.torrent'], [torrent_info_hash], ['/data/downloads'],
]
mocked_proxy.move.return_value = 0
mocked_proxy.d.directory.set.return_value = 0
mocked_proxy.execute.throw.return_value = 0
client = RTorrent('http://localhost/RPC2')
client.move(torrent_info_hash, '/new/folder')
mocked_proxy.execute.throw.assert_has_calls([
mock.call('', 'mkdir', '-p', '/new/folder'),
mock.call('', 'mv', '-u', '/data/downloads', '/new/folder'),
])
def test_start(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.start(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.start.called_with((torrent_info_hash,))
def test_stop(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.close.return_value = 0
mocked_proxy.d.stop.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.stop(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.stop.called_with((torrent_info_hash,))
assert mocked_proxy.d.close.called_with((torrent_info_hash,))
@mock.patch('flexget.plugins.clients.rtorrent.RTorrent')
class TestRTorrentOutputPlugin(object):
config = """
tasks:
test_add_torrent:
accept_all: yes
mock:
- {title: 'test', url: '""" + torrent_url + """'}
rtorrent:
action: add
start: yes
mkdir: yes
uri: http://localhost/SCGI
priority: high
path: /data/downloads
custom1: test_custom1
test_add_torrent_set:
accept_all: yes
set:
path: /data/downloads
custom1: test_custom1
priority: low
custom2: test_custom2
mock:
- {title: 'test', url: '""" + torrent_url + """'}
rtorrent:
action: add
start: no
mkdir: no
uri: http://localhost/SCGI
test_update:
accept_all: yes
set:
path: /data/downloads
priority: low
mock:
- {title: 'test', url: '""" + torrent_url + """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: update
uri: http://localhost/SCGI
custom1: test_custom1
test_update_path:
accept_all: yes
mock:
- {title: 'test', url: '""" + torrent_url + """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: update
custom1: test_custom1
uri: http://localhost/SCGI
path: /new/path
test_delete:
accept_all: yes
mock:
- {title: 'test', url: '""" + torrent_url + """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: delete
uri: http://localhost/SCGI
custom1: test_custom1
"""
def test_add(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.torrent.side_effect = [False, {'hash': torrent_info_hash}]
execute_task('test_add_torrent')
mocked_client.load.assert_called_with(
torrent_raw,
fields={'priority': 3, 'directory': '/data/downloads', 'custom1': 'test_custom1'},
start=True,
mkdir=True,
)
def test_add_set(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.torrent.side_effect = [False, {'hash': torrent_info_hash}]
execute_task('test_add_torrent_set')
mocked_client.load.assert_called_with(
torrent_raw,
fields={
'priority': 1,
'directory': '/data/downloads',
'custom1': 'test_custom1',
'custom2': 'test_custom2'
},
start=False,
mkdir=False,
)
def test_update(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_client.update.return_value = 0
# ntpath complains on windows if base_path is a MagicMock
mocked_client.torrent.side_effect = [False, {'base_path': ''}]
execute_task('test_update')
mocked_client.update.assert_called_with(
torrent_info_hash,
{'priority': 1, 'custom1': 'test_custom1'}
)
def test_update_path(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_client.update.return_value = 0
mocked_client.move.return_value = 0
mocked_client.torrent.return_value = {'base_path': '/some/path'}
execute_task('test_update_path')
mocked_client.update.assert_called_with(
torrent_info_hash,
{'custom1': 'test_custom1'}
)
mocked_client.move.assert_called_with(
torrent_info_hash,
'/new/path',
)
def test_delete(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.delete.return_value = 0
execute_task('test_delete')
mocked_client.delete.assert_called_with(torrent_info_hash)
@mock.patch('flexget.plugins.clients.rtorrent.RTorrent')
class TestRTorrentInputPlugin(object):
config = """
tasks:
test_input:
accept_all: yes
from_rtorrent:
uri: http://localhost/RPC2
view: complete
fields:
- custom1
- custom3
- down_rate
"""
def test_input(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_torrent = {
'name': 'private.torrent',
'hash': torrent_info_hash,
'base_path': '/data/downloads/private',
'custom1': 'test_custom1',
'custom3': 'test_custom3',
'down_rate': 123456,
}
mocked_client.torrents.return_value = [mocked_torrent, mocked_torrent]
task = execute_task('test_input')
mocked_client.torrents.assert_called_with(
'complete',
fields=['custom1', 'custom3', 'down_rate'],
)
assert len(task.all_entries) == 2
for entry in task.entries:
assert entry['url'] == 'http://localhost/RPC2/%s' % torrent_info_hash
assert entry['name'] == 'private.torrent'
assert entry['torrent_info_hash'] == torrent_info_hash
assert entry['path'] == '/data/downloads/private'
assert entry['custom1'] == 'test_custom1'
assert entry['custom3'] == 'test_custom3'
| |
"""
Topological sort.
From Tim Peters, see:
http://mail.python.org/pipermail/python-list/1999-July/006660.html
topsort takes a list of pairs, where each pair (x, y) is taken to
mean that x <= y wrt some abstract partial ordering. The return
value is a list, representing a total ordering that respects all
the input constraints.
E.g.,
topsort( [(1,2), (3,3)] )
may return any of (but nothing other than)
[3, 1, 2]
[1, 3, 2]
[1, 2, 3]
because those are the permutations of the input elements that
respect the "1 precedes 2" and "3 precedes 3" input constraints.
Note that a constraint of the form (x, x) is really just a trick
to make sure x appears *somewhere* in the output list.
If there's a cycle in the constraints, say
topsort( [(1,2), (2,1)] )
then CycleError is raised, and the exception object supports
many methods to help analyze and break the cycles. This requires
a good deal more code than topsort itself!
"""
from exceptions import Exception
class CycleError(Exception):
def __init__(self, sofar, numpreds, succs):
Exception.__init__(self, "cycle in constraints",
sofar, numpreds, succs)
self.preds = None
# return as much of the total ordering as topsort was able to
# find before it hit a cycle
def get_partial(self):
return self[1]
# return remaining elt -> count of predecessors map
def get_pred_counts(self):
return self[2]
# return remaining elt -> list of successors map
def get_succs(self):
return self[3]
# return remaining elements (== those that don't appear in
# get_partial())
def get_elements(self):
return self.get_pred_counts().keys()
# Return a list of pairs representing the full state of what's
# remaining (if you pass this list back to topsort, it will raise
# CycleError again, and if you invoke get_pairlist on *that*
# exception object, the result will be isomorphic to *this*
# invocation of get_pairlist).
# The idea is that you can use pick_a_cycle to find a cycle,
# through some means or another pick an (x,y) pair in the cycle
# you no longer want to respect, then remove that pair from the
# output of get_pairlist and try topsort again.
def get_pairlist(self):
succs = self.get_succs()
answer = []
for x in self.get_elements():
if succs.has_key(x):
for y in succs[x]:
answer.append( (x, y) )
else:
# make sure x appears in topsort's output!
answer.append( (x, x) )
return answer
# return remaining elt -> list of predecessors map
def get_preds(self):
if self.preds is not None:
return self.preds
self.preds = preds = {}
remaining_elts = self.get_elements()
for x in remaining_elts:
preds[x] = []
succs = self.get_succs()
for x in remaining_elts:
if succs.has_key(x):
for y in succs[x]:
preds[y].append(x)
if __debug__:
for x in remaining_elts:
assert len(preds[x]) > 0
return preds
# return a cycle [x, ..., x] at random
def pick_a_cycle(self):
remaining_elts = self.get_elements()
# We know that everything in remaining_elts has a predecessor,
# but don't know that everything in it has a successor. So
# crawling forward over succs may hit a dead end. Instead we
# crawl backward over the preds until we hit a duplicate, then
# reverse the path.
preds = self.get_preds()
from random import choice
x = choice(remaining_elts)
answer = []
index = {}
in_answer = index.has_key
while not in_answer(x):
index[x] = len(answer) # index of x in answer
answer.append(x)
x = choice(preds[x])
answer.append(x)
answer = answer[index[x]:]
answer.reverse()
return answer
def topsort(pairlist):
numpreds = {} # elt -> # of predecessors
successors = {} # elt -> list of successors
for first, second in pairlist:
# make sure every elt is a key in numpreds
if not numpreds.has_key(first):
numpreds[first] = 0
if not numpreds.has_key(second):
numpreds[second] = 0
# if they're the same, there's no real dependence
if first == second:
continue
# since first < second, second gains a pred ...
numpreds[second] = numpreds[second] + 1
# ... and first gains a succ
if successors.has_key(first):
successors[first].append(second)
else:
successors[first] = [second]
# suck up everything without a predecessor
answer = filter(lambda x, numpreds=numpreds: numpreds[x] == 0,
numpreds.keys())
# for everything in answer, knock down the pred count on
# its successors; note that answer grows *in* the loop
for x in answer:
assert numpreds[x] == 0
del numpreds[x]
if successors.has_key(x):
for y in successors[x]:
numpreds[y] = numpreds[y] - 1
if numpreds[y] == 0:
answer.append(y)
# following "del" isn't needed; just makes
# CycleError details easier to grasp
del successors[x]
if numpreds:
# everything in numpreds has at least one predecessor ->
# there's a cycle
if __debug__:
for x in numpreds.keys():
assert numpreds[x] > 0
raise CycleError(answer, numpreds, successors)
return answer
def topsort_levels(pairlist):
numpreds = {} # elt -> # of predecessors
successors = {} # elt -> list of successors
for first, second in pairlist:
# make sure every elt is a key in numpreds
if not numpreds.has_key(first):
numpreds[first] = 0
if not numpreds.has_key(second):
numpreds[second] = 0
# if they're the same, there's no real dependence
if first == second:
continue
# since first < second, second gains a pred ...
numpreds[second] = numpreds[second] + 1
# ... and first gains a succ
if successors.has_key(first):
successors[first].append(second)
else:
successors[first] = [second]
answer = []
while 1:
# Suck up everything without a predecessor.
levparents = [x for x in numpreds.keys() if numpreds[x] == 0]
if not levparents:
break
answer.append( levparents )
for levparent in levparents:
del numpreds[levparent]
if successors.has_key(levparent):
for levparentsucc in successors[levparent]:
numpreds[levparentsucc] -= 1
del successors[levparent]
if numpreds:
# Everything in num_parents has at least one child ->
# there's a cycle.
raise CycleError( answer, numpreds, successors )
return answer
| |
# -*- coding: utf-8 -*-
"""Linear Filters for time series analysis and testing
TODO:
* check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma)
Created on Sat Oct 23 17:18:03 2010
Author: Josef-pktd
"""
#not original copied from various experimental scripts
#version control history is there
from statsmodels.compat.python import range
import numpy as np
import scipy.fftpack as fft
from scipy import signal
from scipy.signal.signaltools import _centered as trim_centered
from ._utils import _maybe_get_pandas_wrapper
def _pad_nans(x, head=None, tail=None):
if np.ndim(x) == 1:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[np.nan] * head, x, [np.nan] * tail]
elif tail is None:
return np.r_[[np.nan] * head, x]
elif head is None:
return np.r_[x, [np.nan] * tail]
elif np.ndim(x) == 2:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[[np.nan] * x.shape[1]] * head, x,
[[np.nan] * x.shape[1]] * tail]
elif tail is None:
return np.r_[[[np.nan] * x.shape[1]] * head, x]
elif head is None:
return np.r_[x, [[np.nan] * x.shape[1]] * tail]
else:
raise ValueError("Nan-padding for ndim > 2 not implemented")
#original changes and examples in sandbox.tsa.try_var_convolve
# don't do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
# ifftn, fftfreq
#from numpy import product,array
def fftconvolveinv(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse filter
doesn't work or I can't get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#code duplication with fftconvolveinv
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
for use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter doesn't work or I can't get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if not in2 is None:
s2 = np.array(in2.shape)
else:
s2 = 0
if not in3 is None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
if not in2 is None:
IN1 = fft.fftn(in2, fsize)
if not in3 is None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#original changes and examples in sandbox.tsa.try_var_convolve
#examples and tests are there
def recursive_filter(x, ar_coeff, init=None):
'''
Autoregressive, or recursive, filtering.
Parameters
----------
x : array-like
Time-series data. Should be 1d or n x 1.
ar_coeff : array-like
AR coefficients in reverse time order. See Notes
init : array-like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
y : array
Filtered array, number of columns determined by x and ar_coeff. If a
pandas object is given, a pandas object is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
'''
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x).squeeze()
ar_coeff = np.asarray(ar_coeff).squeeze()
if x.ndim > 1 or ar_coeff.ndim > 1:
raise ValueError('x and ar_coeff have to be 1d')
if init is not None: # integer init are treated differently in lfiltic
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
init = np.asarray(init, dtype=float)
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
def convolution_filter(x, filt, nsides=2):
'''
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
'''
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = int(np.ceil(len(filt)/2.) - 1) or None
trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x)
filt = np.asarray(filt)
if x.ndim > 1 and filt.ndim == 1:
filt = filt[:, None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
elif filt.ndim == 2:
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
#copied from sandbox.tsa.garch
def miso_lfilter(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
#inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
#for testing 2d equivalence between convolve and correlate
#np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def testHandleDtypeShapeMatch(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
def testReadVariableDtypeMismatch(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32."):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testAssignVariableDtypeMismatch(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to assign variable with wrong "
"dtype. Expected int32 got float."):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes()
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
@test_util.run_in_graph_and_eager_modes()
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes()
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes()
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with self.test_session(use_gpu=True):
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
# TODO(alive): fix bug in convert_to_tensor; get this to work in Eager.
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint)
# TODO(alive): how should this work in Eager mode?
def testInitFn(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
# TODO(alive): fix bug in convert_to_tensor; get this to work in Eager.
def testInitFnDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(dtypes.float32, v.value().dtype)
# TODO(alive): fix bug in convert_to_tensor; get this to work in Eager.
def testInitFnNoDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1)
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes()
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes()
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testSparseRead(self):
with self.test_session():
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var0")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
def testToFromProto(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
@test_util.run_in_graph_and_eager_modes()
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0)
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.NotFoundError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
def testAssignDifferentShapes(self):
with self.test_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEager(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = w.value().op.get_attr("_class")
@test_util.run_in_graph_and_eager_modes()
def testSharedName(self):
v = resource_variable_ops.ResourceVariable(300.0, name="var1")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var1")
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var2")
if context.in_graph_mode():
with self.assertRaisesOpError("Resource .*/var2/.* does not exist"):
x_read = resource_variable_ops.read_variable_op(x, v.dtype.base_dtype)
self.evaluate(x_read)
else:
with self.assertRaisesRegexp(errors.NotFoundError,
"Attempted to read a nonexistent variable."):
_ = resource_variable_ops.read_variable_op(x, v.dtype.base_dtype)
@test_util.run_in_graph_and_eager_modes()
def testSharedNameWithNamescope(self):
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var3")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var3")
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes()
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if context.in_graph_mode():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
def testSetInitialValue(self):
with self.test_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var5",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var5:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertAllEqual(init.device, v.device)
self.assertTrue(isinstance(v.handle, ops.EagerTensor))
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var6")
self.assertEqual("var6:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Javascript code generation.
This module contains a few utilities for Javascript code generation.
"""
import re
import sys
import naming
import gflags
import log
import cpp_utils
import writer
_doxygen_tag_re = re.compile(r'\s*\\(\w+) ')
_param_re = re.compile(r'\s*\\param (\w+) (.*?)$')
_non_id_re = re.compile(r'[^A-Z0-9_]')
def GetScopePrefix(scope, type_defn, scope_operator):
"""Gets the prefix string to reference a type from a given scope.
This function returns a concatenation of js scope operators such as, in the
context of the given scope, when prefixing the name of the given type, it
will reference exactly that type.
For example, given:
namespace A {
namespace B {
class C;
}
namespace D {
void F();
}
}
To access C from F, one needs to refer to it by B::C. This function will
return the 'B::' part.
Args:
scope: the Definition for the scope from which the type must be accessed.
type_defn: the Definition for the type which must be accessed.
scope_operator: the scope operator for your language, ie '.' or '::'
Returns:
the prefix string.
"""
return cpp_utils.GetScopePrefixWithScopeOperator(scope, type_defn, '.')
def GetScopedName(scope, type_defn):
"""Gets the prefix string to reference a type from a given scope.
This function returns a concatenation of C++ scope operators such as, in the
context of the given scope, when prefixing the name of the given type, it
will reference exactly that type.
For example, given:
namespace A {
namespace B {
class C;
}
namespace D {
void F();
}
}
To access C from F, one needs to refer to it by B::C. This function will
return exactly that.
Args:
scope: the Definition for the scope from which the type must be accessed.
type_defn: the Definition for the type which must be accessed.
Returns:
the scoped reference string.
"""
return GetScopePrefix(scope, type_defn) + type_defn.name
def GetFullyQualifiedScopePrefix(scope):
"""Gets the fully qualified scope prefix.
Args:
scope: the Definition for the scope from which the type must be accessed.
Returns:
the fully qualified scope prefix string.
"""
scope_stack = scope.GetParentScopeStack() + [scope]
return '.'.join([s.name for s in scope_stack[1:]] + [''])
def GetFullyQualifiedTypeName(type_defn):
"""Gets the fully qualified name for a type
Args:
type_defn: the type definition you want a name for.
Returns:
the fully qualified name string.
"""
return type_defn.binding_model.JSDocTypeString(type_defn)
def GetFullyQualifiedTypeString(type_defn):
"""
"""
type_defn = type_defn.GetFinalType()
type_stack = type_defn.GetParentScopeStack()
name = type_defn.name
return '.'.join([s.name for s in type_stack[1:]] + [name])
def GetGetterName(field):
"""Gets the name of the getter function for a member field.
Unless overridden by the 'getter' attribute in IDL, the default name for the
getter function is the name of the field, normalized to the lower-case
convention.
Args:
field: the Definition for the field.
Returns:
the name of the getter function.
"""
return (field.attributes['getter'] or naming.Normalize(field.name,
naming.Lower))
def GetSetterName(field):
"""Gets the name of the setter function for a member field.
Unless overridden by the 'setter' attribute in IDL, the default name for the
setter function is 'set_' concatenated with the name of the field, normalized
to the lower-case convention.
Args:
field: the Definition for the field.
Returns:
the name of the setter function.
"""
return (field.attributes['setter'] or
'set_%s' % naming.Normalize(field.name, naming.Lower))
def GetFunctionParamPrototype(scope, param):
"""Gets the string needed to declare a parameter in a function prototype.
Args:
scope: the scope of the prototype.
param: the Function.Param to declare
Returns:
a (string, list) pair. The string is the declaration of the parameter in
the prototype. The list contains (nam, Definition) pairs, describing the
types that need to be forward-declared (bool is false) or defined (bool is
true).
"""
bm = param.type_defn.binding_model
if param.mutable:
text, need_defn = bm.CppMutableParameterString(scope, param.type_defn)
else:
text, need_defn = bm.CppParameterString(scope, param.type_defn)
name = naming.Normalize(param.name, naming.Java)
return name, [(name, param.type_defn)]
def GetFunctionPrototype(scope, obj, member):
"""Gets the string needed to declare a function prototype.
Args:
scope: the scope of the prototype.
obj: the function to declare.
member: True if member function
Returns:
A string prototype.
"""
id_prefix = GetFullyQualifiedScopePrefix(scope)
proto = ''
if member:
proto = 'prototype.'
param_strings = [GetFunctionParamPrototype(scope, p)[0] for p in obj.params]
param_string = ', '.join(param_strings)
prototype = '%s%s%s = function(%s) { };' % (
id_prefix, proto, naming.Normalize(obj.name, naming.Java), param_string)
return prototype
def GetFunctionParamType(obj, param_name):
"""Gets the type of a function param.
Args:
obj: The function.
param_name: The name of the parameter.
Returns
A string which is the type of the parameter.
"""
if param_name[-1] == '?':
param_name = param_name[:-1]
for p in obj.params:
if p.name == param_name:
return GetFullyQualifiedTypeName(p.type_defn)
log.SourceError(obj.source, 'No param "%s" on function "%s"' %
(param_name, obj.name))
return '*'
def GetCommentsForParams(func):
"""Gets the comments for the params.
Args:
func: The function.
param: The parameter.
Returns:
a (string, dict) pair. The string is the comments minus the param parts.
The dict is a dict of param names to comments.
"""
collecting_key = None
param_comments = {}
comments = []
comment_lines = func.attributes['__docs'].splitlines()
for line in comment_lines:
match = _doxygen_tag_re.match(line)
if match:
if match.group(1) == 'param':
match = _param_re.match(line)
if match:
collecting_key = match.group(1)
param_comments[collecting_key] = match.group(2)
else:
log.SourceError(func,
('Incorrect format for param ' +
'comment for param "%s" on function "%s"') %
(param_name, func.name))
else:
comments += [line]
collecting_key = None
elif collecting_key:
param_comments[collecting_key] += '\n' + line
else:
comments += [line]
return '\n'.join(comments), param_comments
def GetParamSpec(obj, param_name):
"""Gets the parameter specification string for a function parameter.
Args:
obj: The function.
param_name: The name of the paramter.
Returns:
a string in JSDOC format for the parameter.
"""
type = GetFunctionParamType(obj, param_name)
return '@param {%s} %s ' % (type, naming.Normalize(param_name, naming.Java))
def GetReturnSpec(obj, flags):
"""Gets the return type specification string for a function.
Args:
obj: The function.
flags: An map of flags. The only one we care about is 'eat_lines' which
we'll set to True if the 'noreturndocs' attribute exists.
Returns:
a string in JSDOC format for the return type.
"""
if gflags.FLAGS['no-return-docs'].value and 'noreturndocs' in obj.attributes:
flags['eat_lines'] = True
return ''
if obj.type_defn:
type = GetFullyQualifiedTypeName(obj.type_defn)
else:
type = "**unknown return type**"
return '@return {%s}' % type
class JavascriptFileWriter(object):
"""Javascript file writer class.
This class helps with generating a Javascript file by parts, by allowing
delayed construction of 'sections' inside the code, that can be filled later.
For example one can create a section for forward declarations, and add code to
that section as the rest of the file gets written.
It also provides facility to add #include lines, and header guards for header
files, as well as simplifies namespace openning and closing.
It helps 'beautifying' the code in simple cases.
"""
class Section(object):
"""Javascript writer section class."""
# this regexp is used for EmitTemplate. It maps {#SomeText} into 'section'
# groups and the rest of the text into 'text' groups in the match objects.
_template_re = re.compile(
r"""
^\s* # skip whitespaces
(?: # non grouping ( )
\$\{\#(?P<section>[_A-Za-z0-9]*)\} # matches a '${#AnyText}' section
# tag, puts the 'AnyText' in a
# 'section' group
| # ... or ...
(?P<text>.*) # matches any text, puts it into
# a 'text' group
) # close non-grouping ( )
\s*$ # skip whitespaces
""", re.MULTILINE | re.VERBOSE)
def __init__(self, indent_string, indent):
"""Inits a JavascriptFileWriter.Section.
Args:
indent_string: the string for one indentation.
indent: the number of indentations for code inside the section.
"""
self._indent_string = indent_string
self._code = []
self._fe_namespaces = []
self._be_namespaces = []
self._section_map = {}
self._indent = indent
self._need_validate = False
def EmitSection(self, section):
"""Emits a section at the current position.
When calling GetLines, the code for the section passed in will be output
at this position.
Args:
section: the section to add.
"""
self._ValidateNamespace()
self._code.append(section)
def CreateUnlinkedSection(self, name, indent=None):
"""Creates a section, but without emitting it.
When calling GetLines, the code for the created section will not be
output unless EmitSection is called.
Args:
name: the name of the section.
indent: (optional) the number of indentations for the new section.
Returns:
the created section.
"""
if not indent:
indent = self._indent
section = JavascriptFileWriter.Section(self._indent_string, indent)
self._section_map[name] = section
return section
def CreateSection(self, name):
"""Creates a section, and emits it at the current position.
When calling GetLines, the code for the created section will be output
at this position.
Args:
name: the name of the section.
Returns:
the created section.
"""
self._ValidateNamespace()
section = self.CreateUnlinkedSection(name, indent=self._indent)
self.EmitSection(section)
return section
def GetSection(self, name):
"""Gets a section by name.
Args:
name: the name of the section.
Returns:
the section if found, None otherwise.
"""
try:
return self._section_map[name]
except KeyError:
return None
def PushNamespace(self, name):
"""Opens a namespace.
This function opens a namespace by emitting code at the current position.
This is done lazily so that openning, closing, then openning the same
namespace again doesn't add extra code.
Args:
name: the name of the namespace.
"""
self._need_validate = True
self._fe_namespaces.append(name)
def PopNamespace(self):
"""Closes the current namespace.
This function closes the current namespace by emitting code at the
current position. This is done lazily so that openning, closing, then
openning the same namespace again doesn't add extra code.
Returns:
the name of the namespace that was closed.
"""
self._need_validate = True
return self._fe_namespaces.pop()
def _ValidateNamespace(self):
"""Validates the current namespace by emitting all the necessary code."""
if not self._need_validate:
return
self._need_validate = False
l = cpp_utils.GetCommonPrefixLength(
self._fe_namespaces, self._be_namespaces)
while len(self._be_namespaces) > l:
name = self._be_namespaces.pop()
self._code.append('} // namespace %s' % name)
for name in self._fe_namespaces[l:]:
self._be_namespaces.append(name)
self._code.append('namespace %s {' % name)
def EmitCode(self, code):
"""Emits code at the current position.
The code passed in will be output at the current position when GetLines
is called. The code is split into lines, and re-indented to match the
section indentation.
Args:
code: a string containing the code to emit.
"""
self._ValidateNamespace()
for line in code.split('\n'):
if not line:
self._code.append('')
else:
self._code.append(line)
def EmitTemplate(self, template):
"""Emits a template at the current position.
Somewhat similarly to string.template.substitute, this function takes a
string containing code and escape sequences. Every time an escape
sequence, of the form '${#SectionName}', is found, a section is created
(or re-used) and emitted at the current position. Otherwise the text is
treated as code and simply emitted as-is. For example take the following
string:
void MyFunction() {
${#MyFunctionBody}
}
Calling EmitTemplate with that string is equivalent to:
section.EmitCode('void MyFunction() {')
section.CreateSection('MyFunctionBody')
section.EmitCode('}')
If a section of that particular name already exists, it is reused.
Args:
template: a string containing the template to emit.
"""
def _Match(mo):
"""Function called for template regexp matches.
Args:
mo: match object.
Returns:
empty string.
"""
section_group = mo.group('section')
if section_group:
if section_group in self._section_map:
section = self._section_map[section_group]
self.EmitSection(section)
else:
self.CreateSection(section_group)
else:
self.EmitCode(mo.group('text'))
return ''
self._template_re.sub(_Match, template)
def IsEmpty(self):
"""Queries whether the section is empty or not.
Returns:
True if the section is empty, False otherwise.
"""
return not self._code
def AddPrefix(self, code):
"""Adds code at the beginning of the section.
Args:
code: a single code line.
"""
self._code = [code] + self._code
def GetLines(self):
"""Retrieves the full contents of the section.
This function gathers all the code that was emitted, including in
children sections.
Returns:
a list of code lines.
"""
# close open namespaces
self._fe_namespaces = []
self._need_validate = True
self._ValidateNamespace()
lines = []
for line in self._code:
if isinstance(line, JavascriptFileWriter.Section):
lines.extend(line.GetLines())
else:
lines.append(line)
return lines
def __init__(self, filename, is_header, header_token=None,
indent_string=' '):
"""Inits a JavascriptFileWriter.
The file writer has a 'main section' where all the code will go. See
CreateSection, EmitCode.
Args:
filename: the name of the file.
is_header: a boolean, True if this is a header file. In that case, the
header guard will be generated.
header_token: (optional) a string for the header guard token. Defaults to
a generated one based on the filename.
indent_string: (optional) the string to be used for indentations.
Defaults to two spaces.
"""
self._filename = filename
self._is_header = is_header
self._header_token = ''
self._includes = []
self._include_section = self.Section(indent_string, 0)
self._main_section = self.Section(indent_string, 0)
def AddInclude(self, name, system=False):
"""Adds an include to the file.
Args:
name: the name of the include.
system: (optional) True if it is a 'system' include (uses the <file.h>
syntax). Defaults to False.
"""
if system:
self._include_section.EmitCode('#include <%s>' % name)
else:
self._include_section.EmitCode('#include "%s"' % name)
def CreateSection(self, name):
"""Creates a section within the main section.
Args:
name: the name of the section to be created.
Returns:
the created section.
"""
return self._main_section.CreateSection(name)
def GetSection(self, name):
"""Gets a section by name from the main section.
Args:
name: the name of the section to look for.
Returns:
the created section if found, None otherwise.
"""
return self._main_section.GetSection(name)
def PushNamespace(self, name):
"""Opens a namespace in the main section.
This function opens a namespace by emitting code at the current position.
This is done lazily so that openning, closing, then openning the same
namespace again doesn't add extra code.
Args:
name: the name of the namespace.
"""
self._main_section.PushNamespace(name)
def PopNamespace(self):
"""Closes the current namespace in the main section.
This function closes the current namespace by emitting code at the
current position. This is done lazily so that openning, closing, then
openning the same namespace again doesn't add extra code.
Returns:
the name of the namespace that was closed.
"""
return self._main_section.PopNamespace()
def EmitCode(self, code):
"""Emits code at the current position in the main section.
Args:
code: a string containing the code to emit.
"""
self._main_section.EmitCode(code)
def GetLines(self):
"""Retrieves the full contents of the file writer.
This function gathers all the code that was emitted, including the
header guard (if this is a header file), and the includes.
Returns:
a list of code lines.
"""
lines = []
include_lines = self._include_section.GetLines()
if include_lines:
lines.append('')
lines.extend(include_lines)
main_lines = self._main_section.GetLines()
if main_lines:
lines.append('')
lines.extend(main_lines)
return lines
def Write(self):
"""Writes the full contents to the file.
This function writes the full contents to the file specified by the
'filename' parameter at creation time.
"""
writer.WriteIfContentDifferent(self._filename,
'\n'.join(self.GetLines()) + '\n')
def main():
pass
if __name__ == '__main__':
main()
| |
"""
WebAPI provides a thin wrapper over `Steam's Web API <https://developer.valvesoftware.com/wiki/Steam_Web_API>`_
It is very friendly to exploration and prototyping when using ``ipython``, ``notebooks`` or similar.
The ``key`` will determine what WebAPI interfaces and methods are available.
.. note::
Some endpoints don't require a key
Currently the WebAPI can be accessed via one of two API hosts. See :class:`APIHost`.
Example code:
.. code:: python
>>> api = WebAPI(key)
>>> api.call('ISteamUser.ResolveVanityURL', vanityurl="valve", url_type=2)
>>> api.ISteamUser.ResolveVanityURL(vanityurl="valve", url_type=2)
>>> api.ISteamUser.ResolveVanityURL_v1(vanityurl="valve", url_type=2)
{'response': {'steamid': '103582791429521412', 'success': 1}}
All globals params (``key``, ``https``, ``format``, ``raw``) can be specified on per call basis.
.. code:: python
>>> print a.ISteamUser.ResolveVanityURL(format='vdf', raw=True, vanityurl="valve", url_type=2)
"response"
{
"steamid" "103582791429521412"
"success" "1"
}
"""
import json as _json
from steam.utils.web import make_requests_session as _make_session
class APIHost(object):
"""Enum of currently available API hosts."""
Public = 'api.steampowered.com'
""" available over HTTP (port 80) and HTTPS (port 443)"""
Partner = 'partner.steam-api.com'
"""available over HTTPS (port 443) only
.. note::
Key is required for every request. If not supplied you will get HTTP 403.
"""
DEFAULT_PARAMS = {
# api parameters
'apihost': APIHost.Public,
'key': None,
'format': 'json',
# internal
'https': True,
'http_timeout': 30,
'raw': False,
}
class WebAPI(object):
"""Steam WebAPI wrapper
.. note::
Interfaces and methods are populated automatically from Steam WebAPI.
:param key: api key from https://steamcommunity.com/dev/apikey
:type key: :class:`str`
:param format: response format, either (``json``, ``vdf``, or ``xml``) only when ``raw=False``
:type format: :class:`str`
:param raw: return raw response
:type raw: class:`bool`
:param https: use ``https``
:type https: :class:`bool`
:param http_timeout: HTTP timeout in seconds
:type http_timeout: :class:`int`
:param apihost: api hostname, see :class:`APIHost`
:type apihost: :class:`str`
:param auto_load_interfaces: load interfaces from the Steam WebAPI
:type auto_load_interfaces: :class:`bool`
These can be specified per method call for one off calls
"""
key = DEFAULT_PARAMS['key']
format = DEFAULT_PARAMS['format']
raw = DEFAULT_PARAMS['raw']
https = DEFAULT_PARAMS['https']
http_timeout = DEFAULT_PARAMS['http_timeout']
apihost = DEFAULT_PARAMS['apihost']
interfaces = []
def __init__(self, key, format = DEFAULT_PARAMS['format'],
raw = DEFAULT_PARAMS['raw'],
https = DEFAULT_PARAMS['https'],
http_timeout = DEFAULT_PARAMS['http_timeout'],
apihost = DEFAULT_PARAMS['apihost'],
auto_load_interfaces = True):
self.key = key #: api key
self.format = format #: format (``json``, ``vdf``, or ``xml``)
self.raw = raw #: return raw reponse or parse
self.https = https #: use https or not
self.http_timeout = http_timeout #: HTTP timeout in seconds
self.apihost = apihost #: ..versionadded:: 0.8.3 apihost hostname
self.interfaces = [] #: list of all interfaces
self.session = _make_session() #: :class:`requests.Session` from :func:`.make_requests_session`
if auto_load_interfaces:
self.load_interfaces(self.fetch_interfaces())
def __repr__(self):
return "%s(key=%s, https=%s)" % (
self.__class__.__name__,
repr(self.key),
repr(self.https),
)
def fetch_interfaces(self):
"""
Returns a dict with the response from ``GetSupportedAPIList``
:return: :class:`dict` of all interfaces and methods
The returned value can passed to :meth:`load_interfaces`
"""
return get('ISteamWebAPIUtil', 'GetSupportedAPIList', 1,
https=self.https,
apihost=self.apihost,
caller=None,
session=self.session,
params={'format': 'json',
'key': self.key,
},
)
def load_interfaces(self, interfaces_dict):
"""
Populates the namespace under the instance
"""
if interfaces_dict.get('apilist', {}).get('interfaces', None) is None:
raise ValueError("Invalid response for GetSupportedAPIList")
interfaces = interfaces_dict['apilist']['interfaces']
if len(interfaces) == 0:
raise ValueError("API returned not interfaces; probably using invalid key")
# clear existing interface instances
for interface in self.interfaces:
delattr(self, interface.name)
self.interfaces = []
# create interface instances from response
for interface in interfaces:
obj = WebAPIInterface(interface, parent=self)
self.interfaces.append(obj)
setattr(self, obj.name, obj)
def call(self, method_path, **kwargs):
"""
Make an API call for specific method
:param method_path: format ``Interface.Method`` (e.g. ``ISteamWebAPIUtil.GetServerInfo``)
:type method_path: :class:`str`
:param kwargs: keyword arguments for the specific method
:return: response
:rtype: :class:`dict`, :class:`lxml.etree.Element` or :class:`str`
"""
interface, method = method_path.split('.', 1)
return getattr(getattr(self, interface), method)(**kwargs)
def doc(self):
"""
:return: Documentation for all interfaces and their methods
:rtype: str
"""
doc = "Steam Web API - List of all interfaces\n\n"
for interface in self.interfaces:
doc += interface.__doc__
return doc
class WebAPIInterface(object):
"""
Steam Web API Interface
"""
def __init__(self, interface_dict, parent):
self._parent = parent
self.name = interface_dict['name']
self.methods = []
for method in interface_dict['methods']:
obj = WebAPIMethod(method, parent=self)
self.methods.append(obj)
# map the method name as attribute including version
setattr(self, "%s_v%d" % (obj.name, obj.version), obj)
# without version, but th refernce of latest version
current_obj = getattr(self, obj.name, None)
if current_obj is None or current_obj.version < obj.version:
setattr(self, obj.name, obj)
def __repr__(self):
return "<%s %s with %s methods>" % (
self.__class__.__name__,
repr(self.name),
repr(len(list(self))),
)
def __iter__(self):
return iter(self.methods)
@property
def key(self):
return self._parent.key
@property
def apihost(self):
return self._parent.apihost
@property
def https(self):
return self._parent.https
@property
def http_timeout(self):
return self._parent.http_timeout
@property
def format(self):
return self._parent.format
@property
def raw(self):
return self._parent.raw
@property
def session(self):
return self._parent.session
def doc(self):
"""
:return: Documentation for all methods on this interface
:rtype: str
"""
return self.__doc__
@property
def __doc__(self):
doc = "%s\n%s\n" % (self.name, '-'*len(self.name))
for method in self.methods:
doc += " %s\n" % method.__doc__.replace("\n", "\n ")
return doc
class WebAPIMethod(object):
"""
Steam Web API Interface Method
"""
def __init__(self, method_dict, parent):
self.last_response = None
self._parent = parent
self._dict = method_dict
params = method_dict['parameters']
self._dict['parameters'] = {}
for param in params:
# add property indicating param can be a list
param['_array'] = param['name'].endswith('[0]')
# remove array suffix
if param['_array']:
param['name'] = param['name'][:-3]
# turn params from a list to a dict
self._dict['parameters'][param['name']] = param
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
repr("%s.%s_v%d" % (
self._parent.name,
self.name,
self.version,
)),
)
def __call__(self, **kwargs):
possible_kwargs = set(self._dict['parameters'].keys()) | set(DEFAULT_PARAMS.keys())
unrecognized = set(kwargs.keys()).difference(possible_kwargs)
if unrecognized:
raise ValueError("Unrecognized parameter %s" % repr(unrecognized.pop()))
params = {}
# process special case kwargs
for param in DEFAULT_PARAMS.keys():
if param in kwargs:
params[param] = kwargs[param]
del kwargs[param]
else:
params[param] = getattr(self._parent, param)
# process method parameters
for param in self.parameters.values():
name = param['name']
islist = param['_array']
optional = param['optional']
if not optional and name not in kwargs and name != 'key':
raise ValueError("Method requires %s to be set" % repr(name))
if name in kwargs:
if islist and not isinstance(kwargs[name], list):
raise ValueError("Expected %s to be a list, got %s" % (
repr(name),
repr(type(kwargs[name])))
)
params[name] = kwargs[name]
url = "%s://%s/%s/%s/v%s/" % (
'https' if self._parent.https else 'http',
self._parent.apihost,
self._parent.name,
self.name,
self.version,
)
return webapi_request(
url=url,
method=self.method,
caller=self,
session=self._parent.session,
params=params,
)
@property
def version(self):
return self._dict['version']
@property
def method(self):
return self._dict['httpmethod']
@property
def parameters(self):
return self._dict['parameters']
@property
def name(self):
return self._dict['name']
def doc(self):
"""
:return: Documentation for this method
:rtype: str
"""
return self.__doc__
@property
def __doc__(self):
doc = "%(httpmethod)s %(name)s (v%(version)04d)\n" % self._dict
if 'description' in self._dict:
doc += "\n %(description)s\n" % self._dict
if len(self.parameters):
doc += " \n Parameters:\n"
for param in sorted(self.parameters.values(), key=lambda x: x['name']):
doc += " %s %s %s%s\n" % (
param['name'].ljust(25),
((param['type']+"[]") if param['_array'] else
param['type']
).ljust(8),
'optional' if param['optional'] else 'required',
(("\n - " + param['description'])
if 'description' in param and param['description'] else ''
),
)
return doc
def webapi_request(url, method='GET', caller=None, session=None, params=None):
"""Low level function for calling Steam's WebAPI
.. versionchanged:: 0.8.3
:param url: request url (e.g. ``https://api.steampowered.com/A/B/v001/``)
:type url: :class:`str`
:param method: HTTP method (GET or POST)
:type method: :class:`str`
:param caller: caller reference, caller.last_response is set to the last response
:param params: dict of WebAPI and endpoint specific params
:type params: :class:`dict`
:param session: an instance requests session, or one is created per call
:type session: :class:`requests.Session`
:return: response based on paramers
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str`
"""
if method not in ('GET', 'POST'):
raise ValueError("Only GET and POST methods are supported, got: %s" % repr(method))
if params is None:
params = {}
onetime = {}
for param in DEFAULT_PARAMS:
params[param] = onetime[param] = params.get(param, DEFAULT_PARAMS[param])
for param in ('raw', 'apihost', 'https', 'http_timeout'):
del params[param]
if onetime['format'] not in ('json', 'vdf', 'xml'):
raise ValueError("Expected format to be json,vdf or xml; got %s" % onetime['format'])
for k, v in list(params.items()): # serialize some types
if isinstance(v, bool): params[k] = 1 if v else 0
elif isinstance(v, dict): params[k] = _json.dumps(v)
elif isinstance(v, list):
del params[k]
for i, lvalue in enumerate(v):
params["%s[%d]" % (k, i)] = lvalue
kwargs = {'params': params} if method == "GET" else {'data': params} # params to data for POST
if session is None: session = _make_session()
f = getattr(session, method.lower())
resp = f(url, stream=False, timeout=onetime['http_timeout'], **kwargs)
# we keep a reference of the last response instance on the caller
if caller is not None: caller.last_response = resp
# 4XX and 5XX will cause this to raise
resp.raise_for_status()
if onetime['raw']:
return resp.text
elif onetime['format'] == 'json':
return resp.json()
elif onetime['format'] == 'xml':
from lxml import etree as _etree
return _etree.fromstring(resp.content)
elif onetime['format'] == 'vdf':
import vdf as _vdf
return _vdf.loads(resp.text)
def get(interface, method, version=1,
apihost=DEFAULT_PARAMS['apihost'], https=DEFAULT_PARAMS['https'],
caller=None, session=None, params=None):
"""Send GET request to an API endpoint
.. versionadded:: 0.8.3
:param interface: interface name
:type interface: str
:param method: method name
:type method: str
:param version: method version
:type version: int
:param apihost: API hostname
:type apihost: str
:param https: whether to use HTTPS
:type https: bool
:param params: parameters for endpoint
:type params: dict
:return: endpoint response
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str`
"""
url = u"%s://%s/%s/%s/v%s/" % (
'https' if https else 'http', apihost, interface, method, version)
return webapi_request(url, 'GET', caller=caller, session=session, params=params)
def post(interface, method, version=1,
apihost=DEFAULT_PARAMS['apihost'], https=DEFAULT_PARAMS['https'],
caller=None, session=None, params=None):
"""Send POST request to an API endpoint
.. versionadded:: 0.8.3
:param interface: interface name
:type interface: str
:param method: method name
:type method: str
:param version: method version
:type version: int
:param apihost: API hostname
:type apihost: str
:param https: whether to use HTTPS
:type https: bool
:param params: parameters for endpoint
:type params: dict
:return: endpoint response
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str`
"""
url = "%s://%s/%s/%s/v%s/" % (
'https' if https else 'http', apihost, interface, method, version)
return webapi_request(url, 'POST', caller=caller, session=session, params=params)
| |
from lettuce import step, world
from survey.features.page_objects.indicators import NewIndicatorPage, ListIndicatorPage
from survey.models import QuestionModule, Batch, Indicator
@step(u'And I visit new indicator page')
def and_i_visit_new_indicator_page(step):
world.page = NewIndicatorPage(world.browser)
world.page.visit()
@step(u'And I fill in the indicator details')
def and_i_fill_in_the_indicator_details(step):
form_data = {'module': world.health_module.id,
'name': 'Health',
'description': 'some description',
'measure': '%',
'batch': world.batch.id}
world.page.fill_valid_values(form_data)
@step(u'Then I should see that the indicator was successfully added')
def then_i_should_see_that_the_indicator_was_successfully_added(step):
world.page.see_success_message("Indicator", "created")
@step(u'And I have two indicators')
def and_i_have_two_indicators(step):
health_module = QuestionModule.objects.create(name="Health")
batch = Batch.objects.create(name="Batch")
world.indicator_1 = Indicator.objects.create(
name="indicator name",
description="rajni indicator",
measure='Percentage',
module=health_module,
batch=batch)
world.indicator_2 = Indicator.objects.create(
name="indicator name 2",
description="rajni indicator 2",
measure='Percentage',
module=health_module,
batch=batch)
@step(u'When I visit indicator listing page')
def when_i_visit_indicator_listing_page(step):
world.page = ListIndicatorPage(world.browser)
world.page.visit()
@step(u'Then I should see all indicators listed')
def then_i_should_see_indicators_listed(step):
world.page.see_indicators(
[world.indicator_1, world.indicator_2, world.indicator_3])
@step(u'And I have three batches')
def and_i_have_three_batches(step):
world.batch_1 = Batch.objects.create(
name="New Batch 1", survey=world.survey)
world.batch_2 = Batch.objects.create(
name="New Batch 2", survey=world.survey)
world.batch_3 = Batch.objects.create(name="New Batch 3")
@step(u'And I have an indicator not in that survey')
def and_i_have_an_indicator_not_in_that_survey(step):
world.indicator_3 = Indicator.objects.create(
name="indicator name 3",
description="rajni indicator 3",
measure='Percentage',
module=world.health_module_1,
batch=world.batch_3)
@step(u'And I have indicator in each batch')
def and_i_have_indicator_in_each_batch(step):
world.indicator_1 = Indicator.objects.create(
name="indicator name 1",
description="rajni indicator 1",
measure='Percentage',
module=world.health_module_1,
batch=world.batch_1)
world.indicator_1b = Indicator.objects.create(
name="indicator name with different module",
description="rajni indicator 1",
measure='Percentage',
module=world.health_module_2,
batch=world.batch_1)
world.indicator_2 = Indicator.objects.create(
name="indicator name 2",
description="rajni indicator 2",
measure='Percentage',
module=world.health_module_2,
batch=world.batch_2)
@step(u'When I select a survey')
def when_i_select_a_survey(step):
world.page.select('survey', [world.survey.id])
@step(u'And I should see action buttons')
def and_i_should_see_action_buttons(step):
world.page.validate_fields_present(
["Delete", "Edit", "Formula", "Analysis"])
@step(u'And I click on get list')
def and_i_click_on_get_list(step):
world.page.click_by_css('#a-indicator-list')
@step(u'Then I should see indicators in that survey')
def then_i_should_see_indicators_in_that_survey(step):
world.page.see_indicators(
[world.indicator_1, world.indicator_1b, world.indicator_2])
world.page.is_text_present(world.indicator_3.name, False)
@step(u'When I select a batch')
def when_i_select_a_batch(step):
world.page.select('batch', [world.batch_1.id])
@step(u'Then I should see indicators in that batch')
def then_i_should_see_indicators_in_that_batch(step):
world.page.see_indicators([world.indicator_1, world.indicator_1b])
world.page.is_text_present(world.indicator_2.name, False)
world.page.is_text_present(world.indicator_3.name, False)
@step(u'And I have two modules')
def and_i_have_two_modules(step):
world.health_module_1 = QuestionModule.objects.create(name="Module")
world.health_module_2 = QuestionModule.objects.create(name="Module 2")
@step(u'When I select a module')
def when_i_select_a_module(step):
world.page.select('module', [world.health_module_1.id])
@step(u'Then I should see indicators in that module')
def then_i_should_see_indicators_in_that_module(step):
world.page.see_indicators([world.indicator_1])
world.page.is_text_present(world.indicator_1b.name, False)
world.page.is_text_present(world.indicator_2.name, False)
world.page.is_text_present(world.indicator_3.name, False)
@step(u'When I click on add indicator button')
def when_i_click_on_add_indicator_button(step):
world.page.click_by_css('#add_indicator')
@step(u'Then I should see add indicator page')
def then_i_should_see_add_indicator_page(step):
world.page = NewIndicatorPage(world.browser)
world.page.validate_url()
@step(u'And I click the delete indicator link')
def and_i_click_the_delete_indicator_link(step):
world.page.click_by_css("#delete-indicator_%s" % world.indicator_1.id)
@step(u'Then I should see confirm indicator batch')
def then_i_should_see_confirm_indicator_batch(step):
world.page.see_confirm_modal_message(world.indicator_1.name)
@step(u'Then I should go back to indicator listing page')
def then_i_should_go_back_to_indicator_listing_page(step):
world.page = ListIndicatorPage(world.browser)
world.page.validate_url()
@step(u'And I should see the indicator successfully deleted')
def and_i_should_see_the_indicator_successfully_deleted(step):
world.page.see_success_message("Indicator", "deleted")
@step(u'And I click the edit indicator link')
def and_i_click_the_edit_indicator_link(step):
world.page.click_by_css("#edit-indicator_%s" % world.indicator_1.id)
@step(u'Then I should see the indicator details in the form')
def then_i_should_see_the_indicator_details_in_the_form(step):
world.form_data = {'name': world.indicator_1.name,
'description': world.indicator_1.description,
'measure': '%'}
world.page.validate_form_values(world.form_data)
world.page.is_text_present(world.indicator_1.batch.name)
world.page.is_text_present(world.indicator_1.module.name)
@step(u'When I fill in the new values for the indicator')
def when_i_fill_in_the_new_values_for_the_indicator(step):
world.form_data = {'survey': world.survey.id,
'batch': world.batch_1.id,
'module': world.indicator_1.module.id,
'name': "Indicator new nme ",
'description': "Hoho description",
'measure': '%'}
world.page.fill_valid_values(world.form_data)
@step(u'Then I should see the indicator successfully edited')
def then_i_should_see_the_indicator_successfully_edited(step):
world.page.see_success_message("Indicator", 'edited')
| |
from contextlib import suppress
import functools
import os
from os.path import dirname, join, realpath
from pprint import pformat
from subprocess import DEVNULL, Popen
import sys
import shutil
import tempfile
import requests
from app.client.cluster_api_client import ClusterMasterAPIClient, ClusterSlaveAPIClient
from app.util import log, poll, process_utils
from app.util.conf.base_config_loader import BASE_CONFIG_FILE_SECTION
from app.util.conf.config_file import ConfigFile
from app.util.conf.configuration import Configuration
from app.util.secret import Secret
class FunctionalTestCluster(object):
"""
This class can create and destroy local clusters consisting of a single master and multiple slave services. It also
provides methods to introspect into the state of the services. This is used for functional tests.
"""
_MASTER_PORT = 43000
_SLAVE_START_PORT = 43001
def __init__(self, verbose=False):
"""
:param verbose: If true, output from the master and slave processes is allowed to pass through to stdout.
:type verbose: bool
"""
self._verbose = verbose
self._logger = log.get_logger(__name__)
self.master = None
self.slaves = []
self._master_eventlog_name = None
self._slave_eventlog_names = []
self._next_slave_port = self._SLAVE_START_PORT
self._clusterrunner_repo_dir = dirname(dirname(dirname(dirname(realpath(__file__)))))
self._app_executable = [sys.executable, '-m', 'app']
self._master_app_base_dir = None
self._slaves_app_base_dirs = []
@property
def master_app_base_dir(self):
return self._master_app_base_dir
@property
def slaves_app_base_dirs(self):
return self._slaves_app_base_dirs
def _create_test_config_file(self, base_dir_sys_path: str, **extra_conf_vals) -> str:
"""
Create a temporary conf file just for this test.
:param base_dir_sys_path: Sys path of the base app dir
:param extra_conf_vals: Optional; additional values to set in the conf file
:return: The path to the conf file
"""
# Copy default conf file to tmp location
self._conf_template_path = join(self._clusterrunner_repo_dir, 'conf', 'default_clusterrunner.conf')
# Create the conf file inside base dir so we can clean up the test at the end just by removing the base dir
test_conf_file_path = tempfile.NamedTemporaryFile(dir=base_dir_sys_path).name
shutil.copy(self._conf_template_path, test_conf_file_path)
os.chmod(test_conf_file_path, ConfigFile.CONFIG_FILE_MODE)
conf_file = ConfigFile(test_conf_file_path)
# Set custom conf file values for this test
conf_values_to_set = {
'secret': Secret.get(),
'base_directory': base_dir_sys_path,
'max_log_file_size': 1024 * 5,
'hostname': 'localhost', # Ensure the slave is reachable by master.
}
conf_values_to_set.update(extra_conf_vals)
for conf_key, conf_value in conf_values_to_set.items():
conf_file.write_value(conf_key, conf_value, BASE_CONFIG_FILE_SECTION)
return test_conf_file_path
def start_master(self, **extra_conf_vals) -> ClusterMasterAPIClient:
"""
Start a master service for this cluster.
:param extra_conf_vals: Optional; additional values to set in the master service conf file
:return: An API client object through which API calls to the master can be made
"""
self._start_master_process(**extra_conf_vals)
return self.master_api_client
def start_slaves(self, num_slaves, num_executors_per_slave=1, start_port=None, **extra_conf_vals):
"""
Start slave services for this cluster.
:param num_slaves: The number of slave services to start
:type num_slaves: int
:param num_executors_per_slave: The number of executors that each slave will be configured to use
:type num_executors_per_slave: int
:param start_port: The port number of the first slave to launch. If None, default to the current counter.
Subsequent slaves will be started on subsequent port numbers.
:type start_port: int | None
:return: A list of API client objects through which API calls to each slave can be made
:rtype: list[ClusterSlaveAPIClient]
"""
new_slaves = self._start_slave_processes(num_slaves, num_executors_per_slave, start_port, **extra_conf_vals)
return [ClusterSlaveAPIClient(base_api_url=slave.url) for slave in new_slaves]
def start_slave(self, **kwargs):
"""
Start a slave service for this cluster. (This is a convenience method equivalent to `start_slaves(1)`.)
:return: An API client object through which API calls to the slave can be made
:rtype: ClusterSlaveAPIClient
"""
return self.start_slaves(num_slaves=1, **kwargs)[0]
@property
def master_api_client(self):
return ClusterMasterAPIClient(base_api_url=self.master.url)
@property
def slave_api_clients(self):
return [ClusterSlaveAPIClient(base_api_url=slave.url) for slave in self.slaves]
def _start_master_process(self, **extra_conf_vals) -> 'ClusterController':
"""
Start the master process on localhost.
:param extra_conf_vals: Optional; additional values to set in the master service conf file
:return: A ClusterController object which wraps the master service's Popen instance
"""
if self.master:
raise RuntimeError('Master service was already started for this cluster.')
popen_kwargs = {}
if not self._verbose:
popen_kwargs.update({'stdout': DEVNULL, 'stderr': DEVNULL}) # hide output of master process
self._master_eventlog_name = tempfile.NamedTemporaryFile(delete=False).name
self._master_app_base_dir = tempfile.TemporaryDirectory()
master_config_file_path = self._create_test_config_file(self._master_app_base_dir.name, **extra_conf_vals)
master_hostname = 'localhost'
master_cmd = self._app_executable + [
'master',
'--port', str(self._MASTER_PORT),
'--eventlog-file', self._master_eventlog_name,
'--config-file', master_config_file_path,
]
# Don't use shell=True in the Popen here; the kill command might only kill "sh -c", not the actual process.
self.master = ClusterController(
Popen(master_cmd, **popen_kwargs),
host=master_hostname,
port=self._MASTER_PORT,
)
self._block_until_master_ready() # wait for master to start up
return self.master
def _block_until_master_ready(self, timeout=10):
"""
Blocks until the master is ready and responsive. Repeatedly sends a GET request to the master until the
master responds. If the master is not responsive within the timeout, raise an exception.
:param timeout: Max number of seconds to wait before raising an exception
:type timeout: int
"""
is_master_ready = functools.partial(self._is_url_responsive, self.master.url)
master_is_ready = poll.wait_for(is_master_ready, timeout_seconds=timeout)
if not master_is_ready:
raise TestClusterTimeoutError('Master service did not start up before timeout.')
def _start_slave_processes(self, num_slaves, num_executors_per_slave, start_port=None, **extra_conf_vals):
"""
Start the slave processes on localhost.
:param num_slaves: The number of slave processes to start
:type num_slaves: int
:param num_executors_per_slave: The number of executors to start each slave with
:type num_executors_per_slave: int
:param start_port: The port number of the first slave to launch. If None, default to the current counter.
Subsequent slaves will be started on subsequent port numbers.
:type start_port: int | None
:return: A list of ClusterController objects which wrap the slave services' Popen instances
:rtype: list[ClusterController]
"""
popen_kwargs = {}
if not self._verbose:
popen_kwargs.update({'stdout': DEVNULL, 'stderr': DEVNULL}) # hide output of slave process
if start_port is not None:
self._next_slave_port = start_port
slave_hostname = 'localhost'
new_slaves = []
for _ in range(num_slaves):
slave_port = self._next_slave_port
self._next_slave_port += 1
slave_eventlog = tempfile.NamedTemporaryFile().name # each slave writes to its own file to avoid collision
self._slave_eventlog_names.append(slave_eventlog)
slave_base_app_dir = tempfile.TemporaryDirectory()
self._slaves_app_base_dirs.append(slave_base_app_dir)
slave_config_file_path = self._create_test_config_file(slave_base_app_dir.name, **extra_conf_vals)
slave_cmd = self._app_executable + [
'slave',
'--port', str(slave_port),
'--num-executors', str(num_executors_per_slave),
'--master-url', '{}:{}'.format(self.master.host, self.master.port),
'--eventlog-file', slave_eventlog,
'--config-file', slave_config_file_path,
]
# Don't use shell=True in the Popen here; the kill command may only kill "sh -c", not the actual process.
new_slaves.append(ClusterController(
Popen(slave_cmd, **popen_kwargs),
host=slave_hostname,
port=slave_port,
))
self.slaves.extend(new_slaves)
self._block_until_slaves_ready()
return new_slaves
def _block_until_slaves_ready(self, timeout=15):
"""
Blocks until all slaves are ready and responsive. Repeatedly sends a GET request to each slave in turn until
the slave responds. If all slaves do not become responsive within the timeout, raise an exception.
:param timeout: Max number of seconds to wait before raising an exception
:type timeout: int
"""
slaves_to_check = self.slaves.copy() # we'll remove slaves from this list as they become ready
def are_all_slaves_ready():
for slave in slaves_to_check.copy(): # copy list so we can modify the original list inside the loop
if self._is_url_responsive(slave.url):
slaves_to_check.remove(slave)
else:
return False
return True
all_slaves_are_ready = poll.wait_for(are_all_slaves_ready, timeout_seconds=timeout)
num_slaves = len(self.slaves)
num_ready_slaves = num_slaves - len(slaves_to_check)
if not all_slaves_are_ready:
raise TestClusterTimeoutError('All slaves did not start up before timeout. '
'{} of {} started successfully.'.format(num_ready_slaves, num_slaves))
def _is_url_responsive(self, url):
is_responsive = False
with suppress(requests.ConnectionError):
resp = requests.get(url)
if resp and resp.ok:
is_responsive = True
return is_responsive
def block_until_build_queue_empty(self, timeout=15):
"""
This blocks until the master's build queue is empty. This data is exposed via the /queue endpoint and contains
any jobs that are currently building or not yet started. If the queue is not empty before the timeout, this
method raises an exception.
:param timeout: The maximum number of seconds to block before raising an exception.
:type timeout: int
"""
if self.master is None:
return
def is_queue_empty():
nonlocal queue_data
queue_resp = requests.get('{}/v1/queue'.format(self.master.url))
if queue_resp and queue_resp.ok:
queue_data = queue_resp.json()
if len(queue_data['queue']) == 0:
return True # queue is empty, so master must be idle
self._logger.info('Waiting on build queue to become empty.')
return False
queue_data = None
queue_is_empty = poll.wait_for(is_queue_empty, timeout_seconds=timeout, poll_period=1,
exceptions_to_swallow=(requests.ConnectionError, ValueError))
if not queue_is_empty:
self._logger.error('Master queue did not become empty before timeout. '
'Last queue response: {}'.format(pformat(queue_data)))
raise TestClusterTimeoutError('Master queue did not become empty before timeout.')
def kill_master(self):
"""
Kill the master process and return an object wrapping the return code, stdout, and stderr.
:return: The killed master service with return code, stdout, and stderr set.
:rtype: ClusterController
"""
if self.master:
self.master.kill()
master, self.master = self.master, None
return master
def kill_slaves(self, kill_gracefully=True):
"""
Kill all the slave processes and return objects wrapping the return code, stdout, and stderr of each process.
:param kill_gracefully: If True do a gracefull kill (sigterm), else do a sigkill
:type kill_gracefully: bool
:return: The killed slave services with return code, stdout, and stderr set.
:rtype: list[ClusterController]
"""
for service in self.slaves:
if service:
service.kill(kill_gracefully)
slaves, self.slaves = self.slaves, []
return slaves
def kill(self):
"""
Kill the master and all the slave subprocesses.
:return: The killed master and killed slave services with return code, stdout, and stderr set.
:rtype: list[ClusterController]
"""
services = [self.kill_master()]
services.extend(self.kill_slaves())
services = [service for service in services if service is not None] # remove `None` values from list
return services
def block_until_n_slaves_marked_dead_in_master(self, num_slaves, timeout):
def are_n_slaves_marked_dead_in_master(n):
slaves_marked_dead = [slave for slave in self.master_api_client.get_slaves().values()
if isinstance(slave, list) and not slave[0].get('is_alive')]
return len(slaves_marked_dead) == n
def are_slaves_marked_dead_in_master():
are_n_slaves_marked_dead_in_master(num_slaves)
slaves_marked_dead_within_timeout = poll.wait_for(are_slaves_marked_dead_in_master, timeout_seconds=timeout)
return slaves_marked_dead_within_timeout
def block_until_n_slaves_dead(self, num_slaves, timeout):
def are_n_slaves_dead(n):
dead_slaves = [slave for slave in self.slaves if not slave.is_alive()]
return len(dead_slaves) == n
def are_slaves_dead():
are_n_slaves_dead(num_slaves)
slaves_died_within_timeout = poll.wait_for(are_slaves_dead, timeout_seconds=timeout)
return slaves_died_within_timeout
class ClusterController(object):
"""
A data container that wraps a process and holds metadata about that process. This is useful for wrapping up data
relating to the various services started by the FunctionalTestCluster (master, slaves, etc.).
"""
def __init__(self, process, host, port):
"""
:param process: The Popen process instance of the associated service
:type process: Popen
:param host: The service host (e.g., 'localhost')
:type host: str
:param port: The service port (e.g., 43000)
:type port: int
"""
self.process = process
self.host = host
self.port = port
self.return_code = None
self.stdout = None
self.stderr = None
self._logger = log.get_logger(__name__)
def kill(self, kill_gracefully=True):
"""
Kill the underlying process for this service object and set the return code and output.
:param kill_gracefully: If True do a gracefull kill (sigterm), else do a sigkill
:type kill_gracefully: bool
:return: The return code, stdout, and stderr of the process
:rtype: (int, str, str)
"""
if kill_gracefully:
self._logger.notice('Gracefully killing process with pid {}...'.format(self.process.pid))
output = process_utils.kill_gracefully(self.process, timeout=15)
else:
self._logger.notice('Hard killing process with pid {}...'.format(self.process.pid))
output = process_utils.kill_hard(self.process)
self.return_code, self.stdout, self.stderr = output
return self.return_code, self.stdout, self.stderr
@property
def url(self):
return '{}://{}:{}'.format(Configuration['protocol_scheme'], self.host, self.port)
def is_alive(self):
return self.process.poll() is None
class TestClusterTimeoutError(Exception):
"""
This represents a timeout occurring during an operation on the test Cluster.
"""
| |
from future import standard_library
standard_library.install_aliases()
from builtins import str
from configparser import ConfigParser
import errno
import logging
import os
import sys
import textwrap
try:
from cryptography.fernet import Fernet
except:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
class AirflowConfigException(Exception):
pass
defaults = {
'core': {
'unit_test_mode': False,
'parallelism': 32,
'load_examples': True,
'plugins_folder': None,
},
'webserver': {
'base_url': 'http://localhost:8080',
'web_server_host': '0.0.0.0',
'web_server_port': '8080',
'authenticate': False,
'filter_by_owner': False,
'demo_mode': False,
'secret_key': 'airflowified',
'expose_config': False,
'threads': 4,
},
'scheduler': {
'statsd_on': False,
'statsd_host': 'localhost',
'statsd_port': 8125,
'statsd_prefix': 'airflow',
'job_heartbeat_sec': 5,
'scheduler_heartbeat_sec': 60,
'authenticate': False,
},
'celery': {
'default_queue': 'default',
'flower_port': '5555'
},
'smtp': {
'smtp_starttls': True,
},
}
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
base_log_folder = {AIRFLOW_HOME}/logs
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is use in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Secret key used to run your flask app
secret_key = temporary_key
# number of threads to run the Gunicorn web server
thread = 4
# Expose the configuration file in the web server
expose_config = true
# Set to true to turn on authentication : http://pythonhosted.org/airflow/installation.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
[smtp]
# If you want airflow to send emails on retries, failure, and you want to
# the airflow.utils.send_email function, you have to configure an smtp
# server here
smtp_host = localhost
smtp_starttls = True
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the port that Celery Flower runs on
flower_port = 5555
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# Statsd (https://github.com/etsy/statsd) integration settings
# statsd_on = False
# statsd_host = localhost
# statsd_port = 8125
# statsd_prefix = airflow
"""
TEST_CONFIG = """\
[core]
airflow_home = {AIRFLOW_HOME}
dags_folder = {AIRFLOW_HOME}/dags
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
unit_test_mode = True
load_examples = True
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
"""
class ConfigParserWithDefaults(ConfigParser):
def __init__(self, defaults, *args, **kwargs):
self.defaults = defaults
ConfigParser.__init__(self, *args, **kwargs)
def get(self, section, key):
section = str(section).lower()
key = str(key).lower()
d = self.defaults
# environment variables get precedence
# must have format AIRFLOW__{SESTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
# ...then the config file
elif self.has_option(section, key):
return expand_env_var(ConfigParser.get(self, section, key))
# ...then the defaults
elif section in d and key in d[section]:
return expand_env_var(d[section][key])
else:
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val == "true":
return True
elif val == "false":
return False
else:
raise AirflowConfigException("Not a boolean.")
def getint(self, section, key):
return int(self.get(section, key))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
"""
Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
"~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
"""
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
if not os.path.isfile(AIRFLOW_CONFIG):
"""
These configuration options are used to generate a default configuration
when it is missing. The right way to change your configuration is to alter
your configuration file, not this code.
"""
FERNET_KEY = generate_fernet_key()
logging.info("Creating new config file in: " + AIRFLOW_CONFIG)
f = open(AIRFLOW_CONFIG, 'w')
f.write(DEFAULT_CONFIG.format(**locals()))
f.close()
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info("Creating new config file in: " + TEST_CONFIG_FILE)
f = open(TEST_CONFIG_FILE, 'w')
f.write(TEST_CONFIG.format(**locals()))
f.close()
logging.info("Reading the config from " + AIRFLOW_CONFIG)
def test_mode():
conf = ConfigParserWithDefaults(defaults)
conf.read(TEST_CONFIG)
conf = ConfigParserWithDefaults(defaults)
conf.read(AIRFLOW_CONFIG)
if 'cryptography' in sys.modules and not conf.has_option('core', 'fernet_key'):
logging.warning(textwrap.dedent("""
Your system supports encrypted passwords for Airflow connections but is
currently storing them in plaintext! To turn on encryption, add a
"fernet_key" option to the "core" section of your airflow.cfg file,
like this:
[core]
fernet_key = <YOUR FERNET KEY>
Your airflow.cfg file is located at: {cfg}.
If you need to generate a fernet key, you can run this code:
from airflow.configuration import generate_fernet_key
generate_fernet_key()
""".format(cfg=AIRFLOW_CONFIG)))
| |
import urllib2
try:
import xml.etree.cElementTree as xml
except ImportError:
import xml.etree.ElementTree as xml
from datetime import datetime
import logging
import socket
from util import create_xml_from_dict, dict_ignore_nones
from api_exceptions import CommsException, InvalidResponse
import parse
import settings
logger = logging.getLogger(__name__)
filelog = logging.getLogger('filelog.' + __name__)
class CoreAPI(object):
def __init__(
self, username, password, url,
remote_ip, remote_site, accept_language,
ext_start_session_url, api_request_timeout,
additional_elements=None
):
self.username = username
self.password = password
self.url = url
self.remote_ip = remote_ip
self.remote_site = remote_site
self.accept_language = accept_language
self.ext_start_session_url = ext_start_session_url
self.content_language = None
self.running_user = None
if api_request_timeout:
self.api_request_timeout = api_request_timeout
else:
self.api_request_timeout = settings.API_REQUEST_TIMEOUT
if not additional_elements:
additional_elements = {}
self.additional_elements = additional_elements
def _post(self, method_name, data, url, headers=None):
filelog.debug(
u'URL=%s; API_REQUEST=%s',
url, unicode(data, 'UTF-8')
)
request = urllib2.Request(
url=url, data=data, headers=headers
)
response_string = None
before = datetime.now()
after = None
try:
response = urllib2.urlopen(
request, timeout=self.api_request_timeout
)
except urllib2.HTTPError as e:
after = datetime.now()
raise CommsException(
underlying_exception=e,
description=(
'HTTPError, error_code={0}'.format(
getattr(e, 'code', None),
)
)
)
except urllib2.URLError as e:
after = datetime.now()
raise CommsException(
underlying_exception=e,
description=(
'URLError, reason={0}'.format(
getattr(e, 'reason', None),
)
)
)
except socket.timeout as e:
after = datetime.now()
err_string = 'Socket timeout'
details = False
if getattr(e, 'message', None):
err_string = '{0} - {1}'.format(
err_string, e.message
)
details = True
if (
getattr(e, 'errno', None) and
getattr(e, 'strerror', None)
):
err_string = '{0} - Errno={0}, strerror={1}.'.format(
e.errno, e.strerror
)
details = True
if not details:
err_string = '{0} - {1}'.format(
err_string, str(e)
)
raise CommsException(
underlying_exception=e,
description=err_string,
)
except socket.error as e:
after = datetime.now()
err_string = 'Socket error'
details = False
if getattr(e, 'message', None):
err_string = '{0} - {1}'.format(
err_string, e.message
)
details = True
if (
getattr(e, 'errno', None) and
getattr(e, 'strerror', None)
):
err_string = '{0} - Errno={0}, strerror={1}.'.format(
e.errno, e.strerror
)
details = True
if not details:
err_string = '{0} - {1}'.format(
err_string, str(e)
)
raise CommsException(
underlying_exception=e,
description=err_string,
)
else:
after = datetime.now()
response_string = response.read()
self.content_language = response.info(
).getheader('Content-Language')
filelog.debug(
u'API_RESPONSE=%s',
unicode(response_string, 'UTF-8')
)
finally:
if after:
time_taken = (after - before).total_seconds()
logger.debug(
'url=%s, api_call=%s, time_taken=%s',
url, method_name, time_taken
)
else:
logger.error(
(
'Response time unknown, url=%s, ' +
'api_call=%s, request_time=%s'
),
url, method_name, before.strftime('%d/%m/%Y %H:%M:%S')
)
return response_string
def _create_xml_and_post(self, method_name, arg_dict, url=None):
data = xml.tostring(
create_xml_from_dict(method_name, arg_dict),
encoding='UTF-8'
)
if not url:
url = self.url
headers = {
'Content-Type': 'text/xml',
}
if self.accept_language:
headers['Accept-Language'] = self.accept_language
try:
response = xml.fromstring(
self._post(
method_name=method_name,
data=data,
headers=headers,
url=url
)
)
except CommsException as e:
logger.error(e)
raise e
except xml.ParseError as e:
err_string = 'XML parsing error, detail="{0}", arguments="{1}"'
raise InvalidResponse(
underlying_exception=e,
description=err_string.format(
str(e), arg_dict
),
)
return response
def make_core_request(self, api_call, **kwargs):
args = {
'user_id': self.username,
'remote_ip': self.remote_ip,
'remote_site': self.remote_site,
}
args.update(self.additional_elements)
args.update(kwargs)
return self._create_xml_and_post(
method_name=api_call,
arg_dict=dict_ignore_nones(**args),
url=self.url
)
def parse_response(self, parse_function, xml_elem):
""" Calls the specified parse function
Calls the specified parse function and logs
any errors that are raised.
"""
try:
result = parse_function(
parse.script_error(xml_elem)
)
except Exception as e:
logger.error(e)
raise e
return result
def start_session_resolve_user(
self, user_id=None, remote_site=None, remote_ip=None
):
resp = self._create_xml_and_post(
method_name='start_session',
arg_dict=dict_ignore_nones(
user_id=user_id,
remote_site=remote_site,
remote_ip=remote_ip
),
url=self.ext_start_session_url
)
return self.parse_response(
parse.start_session_resolve_user_result, resp
)
def start_session(self):
if not self.password or not self.username:
resp = self.start_session_resolve_user(
user_id=self.username,
remote_ip=self.remote_ip,
remote_site=self.remote_site,
)
crypto_block = resp['crypto_block']
self.username = resp['running_user'].user_id
self.running_user = resp['running_user']
else:
resp = self._create_xml_and_post(
method_name='start_session',
arg_dict={
'user_id': self.username,
'user_passwd': self.password,
}
)
crypto_block = resp.findtext('crypto_block')
return crypto_block
def style_map(self, map_key):
resp = self.make_core_request(
'style_map',
user_passwd=self.password,
map_key=map_key
)
return self.parse_response(parse.style_map_result, resp)
def event_search(
self, crypto_block=None, upfront_data_token=None, s_keys=None,
s_dates=None, s_coco=None, s_city=None, s_geo=None, s_geo_lat=None,
s_geo_long=None, s_geo_rad_km=None, s_src=None, s_area=None,
s_ven=None, s_eve=None, s_class=None, event_token_list=None,
request_source_info=None, request_extra_info=None,
request_video_iframe=None, request_cost_range=None,
request_media=None, request_custom_fields=None, request_reviews=None,
request_avail_details=None,
s_top=None, s_user_rating=None,
s_critic_rating=None, s_auto_range=None, page_length=None,
page_number=None,
s_cust_fltr=None, s_airport=None,
mime_text_type=None,
):
if crypto_block is None:
user_passwd = self.password
else:
user_passwd = None
resp = self.make_core_request(
'event_search',
user_passwd=user_passwd, crypto_block=crypto_block,
upfront_data_token=upfront_data_token, s_keys=s_keys,
s_dates=s_dates, s_coco=s_coco, s_geo=s_geo, s_geo_lat=s_geo_lat,
s_geo_long=s_geo_long, s_geo_rad_km=s_geo_rad_km, s_src=s_src,
s_area=s_area, s_ven=s_ven, s_eve=s_eve, s_class=s_class,
s_city=s_city, event_token_list=event_token_list,
request_source_info=request_source_info,
request_extra_info=request_extra_info,
request_video_iframe=request_video_iframe,
request_cost_range=request_cost_range,
request_media=request_media,
request_custom_fields=request_custom_fields,
request_avail_details=request_avail_details,
s_top=s_top, s_user_rating=s_user_rating,
s_critic_rating=s_critic_rating,
s_auto_range=s_auto_range, page_length=page_length,
page_number=page_number,
s_cust_fltr=s_cust_fltr,
request_reviews=request_reviews,
s_airport=s_airport,
mime_text_type=mime_text_type,
)
return self.parse_response(parse.event_search_result, resp)
def extra_info(
self, crypto_block, event_token, upfront_data_token=None,
source_info=None, request_media=None,
mime_text_type=None, request_avail_details=None
):
resp = self.make_core_request(
'extra_info',
crypto_block=crypto_block, upfront_data_token=upfront_data_token,
event_token=event_token, source_info=source_info,
request_media=request_media, mime_text_type=mime_text_type,
request_avail_details=request_avail_details,
)
return self.parse_response(parse.extra_info_result, resp)
def date_time_options(
self, crypto_block, event_token, upfront_data_token=None,
earliest_date=None, latest_date=None, request_cost_range=None,
page_length=None, page_number=None
):
resp = self.make_core_request(
'date_time_options',
crypto_block=crypto_block, event_token=event_token,
upfront_data_token=upfront_data_token, earliest_date=earliest_date,
latest_date=latest_date, request_cost_range=request_cost_range,
page_length=page_length, page_number=page_number,
)
return self.parse_response(parse.date_time_options_result, resp)
def month_options(
self, crypto_block, event_token, upfront_data_token=None,
):
resp = self.make_core_request(
'month_options',
crypto_block=crypto_block,
event_token=event_token,
upfront_data_token=upfront_data_token,
)
return self.parse_response(parse.month_options_result, resp)
def availability_options(
self, crypto_block, upfront_data_token=None, perf_token=None,
departure_date=None, usage_date=None, self_print_mode=None,
trolley_token=None, add_discounts=None, quantity_options_only=None,
no_of_tickets=None, add_free_seat_blocks=None,
add_user_commission=None,
):
resp = self.make_core_request(
'availability_options',
crypto_block=crypto_block, upfront_data_token=upfront_data_token,
perf_token=perf_token, departure_date=departure_date,
usage_date=usage_date, self_print_mode=self_print_mode,
trolley_token=trolley_token, add_discounts=add_discounts,
quantity_options_only=quantity_options_only,
no_of_tickets=no_of_tickets,
add_free_seat_blocks=add_free_seat_blocks,
add_user_commission=add_user_commission,
)
return self.parse_response(parse.availability_options_result, resp)
def despatch_options(
self, crypto_block, upfront_data_token=None, perf_token=None,
departure_date=None, usage_date=None, self_print_mode=None,
trolley_token=None,
):
resp = self.make_core_request(
'despatch_options',
crypto_block=crypto_block, upfront_data_token=upfront_data_token,
perf_token=perf_token, departure_date=departure_date,
usage_date=usage_date, self_print_mode=self_print_mode,
trolley_token=trolley_token,
)
return self.parse_response(parse.despatch_options_result, resp)
def discount_options(
self, crypto_block, band_token, no_of_tickets,
upfront_data_token=None, despatch_token=None, trolley_token=None,
seat_block_token=None, seat_block_offset=None,
add_user_commission=None,
):
resp = self.make_core_request(
'discount_options',
crypto_block=crypto_block,
band_token=band_token, despatch_token=despatch_token,
no_of_tickets=no_of_tickets, upfront_data_token=upfront_data_token,
trolley_token=trolley_token, seat_block_token=seat_block_token,
seat_block_offset=seat_block_offset,
add_user_commission=add_user_commission,
)
return self.parse_response(parse.discount_options_result, resp)
def create_order(
self, crypto_block, upfront_data_token=None, discount_token=None,
despatch_token=None,
):
resp = self.make_core_request(
'create_order',
crypto_block=crypto_block, upfront_data_token=upfront_data_token,
discount_token=discount_token, despatch_token=despatch_token,
)
return self.parse_response(parse.create_order_result, resp)
def create_order_and_reserve(
self, crypto_block, upfront_data_token=None, discount_token=None,
despatch_token=None,
):
resp = self.make_core_request(
'create_order_and_reserve',
crypto_block=crypto_block, upfront_data_token=upfront_data_token,
discount_token=discount_token, despatch_token=despatch_token,
)
return self.parse_response(parse.create_order_and_reserve_result, resp)
def trolley_add_order(
self, crypto_block, order_token, upfront_data_token=None,
trolley_token=None, describe_trolley=None
):
resp = self.make_core_request(
'trolley_add_order',
crypto_block=crypto_block, order_token=order_token,
upfront_data_token=upfront_data_token, trolley_token=trolley_token,
describe_trolley=describe_trolley,
)
return self.parse_response(parse.trolley_add_order_result, resp)
def trolley_describe(
self, crypto_block, trolley_token, upfront_data_token=None,
):
resp = self.make_core_request(
'trolley_describe',
crypto_block=crypto_block, trolley_token=trolley_token,
upfront_data_token=upfront_data_token,
)
return self.parse_response(parse.trolley_describe_result, resp)
def trolley_remove(
self, crypto_block, trolley_token, upfront_data_token=None,
remove_item=None, describe_trolley=None,
):
resp = self.make_core_request(
'trolley_remove',
crypto_block=crypto_block, trolley_token=trolley_token,
upfront_data_token=upfront_data_token, remove_item=remove_item,
describe_trolley=describe_trolley,
)
return self.parse_response(parse.trolley_remove_result, resp)
def make_reservation(
self, crypto_block, trolley_token, upfront_data_token=None,
self_print_mode=None, describe_trolley=None,
):
resp = self.make_core_request(
'make_reservation',
crypto_block=crypto_block, trolley_token=trolley_token,
upfront_data_token=upfront_data_token,
self_print_mode=self_print_mode,
describe_trolley=describe_trolley,
)
return self.parse_response(parse.make_reservation_result, resp)
def get_reservation_link(
self, crypto_block, trolley_token, upfront_data_token=None,
):
resp = self.make_core_request(
'get_reservation_link',
crypto_block=crypto_block, trolley_token=trolley_token,
upfront_data_token=upfront_data_token,
)
return self.parse_response(parse.get_reservation_link_result, resp)
def release_reservation(
self, crypto_block, upfront_data_token=None,
):
resp = self.make_core_request(
'release_reservation',
crypto_block=crypto_block, upfront_data_token=upfront_data_token,
)
return self.parse_response(parse.release_reservation_result, resp)
def purchase_reservation_part_one(
self, crypto_block, customer_data, return_token, return_domain,
return_path, return_with_https, encryption_key, card_data=None,
user_can_use_data=None, supplier_can_use_data=None,
world_can_use_data=None, upfront_data_token=None,
):
resp = self.make_core_request(
'purchase_reservation_part_one',
crypto_block=crypto_block, customer_data=customer_data,
return_token=return_token, return_domain=return_domain,
return_path=return_path, return_with_https=return_with_https,
encryption_key=encryption_key, card_data=card_data,
user_can_use_data=user_can_use_data,
supplier_can_use_data=supplier_can_use_data,
world_can_use_data=world_can_use_data,
upfront_data_token=upfront_data_token,
)
return self.parse_response(
parse.purchase_reservation_part_one_result, resp
)
def purchase_reservation_part_two(
self, returning_token, new_return_token, new_return_path, http_referer,
http_accept, http_user_agent, callback_data, encryption_key,
crypto_block=None, send_confirmation_email=None, results_url=None,
upfront_data_token=None,
):
if crypto_block is None:
user_passwd = self.password
else:
user_passwd = None
resp = self.make_core_request(
'purchase_reservation_part_two',
user_passwd=user_passwd,
crypto_block=crypto_block,
upfront_data_token=upfront_data_token,
returning_token=returning_token,
new_return_token=new_return_token,
new_return_path=new_return_path,
http_referer=http_referer, http_accept=http_accept,
http_user_agent=http_user_agent, callback_data=callback_data,
encryption_key=encryption_key,
send_confirmation_email=send_confirmation_email,
results_url=results_url,
)
return self.parse_response(
parse.purchase_reservation_part_two_result, resp
)
def purchase_reservation(
self, crypto_block, customer_data, card_data=None,
send_confirmation_email=None,
upfront_data_token=None,
):
resp = self.make_core_request(
'purchase_reservation',
crypto_block=crypto_block, customer_data=customer_data,
card_data=card_data,
send_confirmation_email=send_confirmation_email,
upfront_data_token=upfront_data_token,
)
return self.parse_response(
parse.purchase_reservation, resp
)
def transaction_info(
self, transaction_id, describe_trolley=None, describe_customer=None,
describe_external_sale_page=None, crypto_block=None,
upfront_data_token=None,
):
if crypto_block is None:
user_passwd = self.password
else:
user_passwd = None
resp = self.make_core_request(
'transaction_info',
user_passwd=user_passwd,
transaction_id=transaction_id,
describe_trolley=describe_trolley,
describe_customer=describe_customer,
describe_external_sale_page=describe_external_sale_page,
crypto_block=crypto_block, upfront_data_token=upfront_data_token,
)
return self.parse_response(parse.transaction_info_result, resp)
def save_external_sale_page(
self, transaction_id, sale_page_type, sale_page_subtype, sale_page,
crypto_block=None, upfront_data_token=None,
):
if crypto_block is None:
user_passwd = self.password
else:
user_passwd = None
resp = self.make_core_request(
'save_external_sale_page',
user_passwd=user_passwd,
crypto_block=crypto_block,
upfront_data_token=upfront_data_token,
transaction_id=transaction_id,
sale_page_type=sale_page_type,
sale_page_subtype=sale_page_subtype,
sale_page=sale_page,
)
return self.parse_response(parse.save_external_sale_page_result, resp)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines dataloader functionalities."""
import re
from typing import Any, Callable, Optional, Tuple
from clu import deterministic_data
import jax
from lib.datasets import billiard
from lib.datasets import trafficsigns
from lib.preprocess import image_ops
from lib.preprocess import preprocess_spec
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def get_dataset(
dataset: str,
global_batch_size: int,
rng: np.ndarray,
train_preprocessing_fn: Optional[Callable[[Any], Any]] = None,
eval_preprocessing_fn: Optional[Callable[[Any], Any]] = None,
num_epochs: Optional[int] = None,
filter_fn: Optional[Callable[[Any], Any]] = None,
**kwargs,
) -> Tuple[tf.data.Dataset, tf.data.Dataset, int]:
"""Creates training and eval datasets.
The train dataset will be shuffled, while the eval dataset won't be.
Args:
dataset: Name of the dataset.
global_batch_size: Global batch size to use.
rng: PRNG seed used for shuffling.
train_preprocessing_fn: Function that will be applied on each single sample.
eval_preprocessing_fn: Optional preprocessing function specifically for eval
samples (if None, use train_preprocessing_fn for eval samples as well).
num_epochs: Number of epochs to repeat dataset (default=None, optional).
filter_fn: Funtion that filters samples according to some criteria.
**kwargs: Optional keyword arguments for specific datasets.
Returns:
A tuple consisting of a train dataset, an eval dataset as well as the
number of classes.
"""
del kwargs
rng, preprocess_rng = jax.random.split(rng)
if dataset.startswith("test_image_classification"):
# The shape of the dataset can be provided as suffix of the name of the
# dataset test_image_classification_batch_height_width.
match = re.search(r"test_image_classification_(\d+)_(\d+)_(\d+)_(\d+)",
dataset)
if match:
shape = tuple(int(match.group(i)) for i in [1, 2, 3, 4])
else:
shape = (13, 32, 32, 3)
images_tensor = tf.random.uniform(
shape, minval=0, maxval=256, dtype=tf.int32, seed=22432)
images_tensor = tf.cast(images_tensor, tf.uint8)
labels_tensor = tf.random.uniform((shape[0],),
minval=0,
maxval=10,
dtype=tf.int32,
seed=4202)
ds_image = tf.data.Dataset.from_tensor_slices(images_tensor)
ds_label = tf.data.Dataset.from_tensor_slices(labels_tensor)
ds = tf.data.Dataset.zip({"image": ds_image, "label": ds_label})
train_ds = ds
eval_ds = ds
num_classes = 10
elif dataset == "trafficsigns":
train_ds = trafficsigns.load("train")
eval_ds = trafficsigns.load("test")
num_classes = 4
elif dataset.startswith("billiard"):
# The format of the dataset string is "billiard-label_fn_str-{valid,test}"
# where label_fn_str options are specified in data/billiard.py
# Example: billiard-left-color-min-max-valid
parts = dataset.split("-")
label_fn_str = "-".join(parts[1:-1])
evaluation_split = parts[-1]
train_ds, num_classes = billiard.load_billiard("train", label_fn_str)
eval_ds, _ = billiard.load_billiard(evaluation_split, label_fn_str)
elif dataset.startswith("caltech_birds2011"):
mode = dataset[len("caltech_birds2011") + 1:]
train_ds, eval_ds, num_classes = _get_birds200_dataset(mode, rng)
elif dataset.startswith("test_image_classification"):
# The shape of the dataset can be provided as suffix of the name of the
# dataset test_image_classification_batch_height_width.
match = re.search(r"test_image_classification_(\d+)_(\d+)_(\d+)_(\d+)",
dataset)
if match:
shape = tuple(int(match.group(i)) for i in [1, 2, 3, 4])
else:
shape = (13, 32, 32, 3)
with tf.device("/CPU:0"):
images_tensor = tf.random.uniform(
shape, minval=0, maxval=256, dtype=tf.int32, seed=22432)
images_tensor = tf.cast(images_tensor, tf.uint8)
labels_tensor = tf.random.uniform((shape[0],),
minval=0,
maxval=10,
dtype=tf.int32,
seed=4202)
ds_image = tf.data.Dataset.from_tensor_slices(images_tensor)
ds_label = tf.data.Dataset.from_tensor_slices(labels_tensor)
ds = tf.data.Dataset.zip({"image": ds_image, "label": ds_label})
train_ds = ds
eval_ds = ds
num_classes = 10
else: # Should be a TFDS dataset.
train_ds, eval_ds, num_classes = _get_tfds_dataset(dataset, rng)
# Set up a preprocessing function.
if train_preprocessing_fn is None:
@tf.autograph.experimental.do_not_convert # Usually fails anyway.
def _image_preprocess_fn(features):
if "image" in features:
features["image"] = tf.cast(features["image"], tf.float32) / 255.0
if "id" in features: # Included in some TFDS datasets, breaks JAX.
del features["id"]
return features
train_preprocessing_fn = _image_preprocess_fn
if eval_preprocessing_fn is None:
eval_preprocessing_fn = train_preprocessing_fn
rng_train, rng_eval = jax.random.split(preprocess_rng)
train_ds = _prepare_dataset(
train_ds,
global_batch_size,
True,
rng_train,
train_preprocessing_fn,
num_epochs=num_epochs,
filter_fn=filter_fn)
eval_ds = _prepare_dataset(
eval_ds,
global_batch_size,
False,
rng_eval,
eval_preprocessing_fn,
num_epochs=1,
filter_fn=filter_fn)
return train_ds, eval_ds, num_classes
def _get_birds200_dataset(
mode: str,
rng: np.ndarray) -> Tuple[tf.data.Dataset, tf.data.Dataset, int]:
"""Load the caltech_birds2011 dataset."""
assert jax.host_count() == 1, (
"caltech_birds2011 dataset does not support multihost training. "
"Found {} hosts.".format(jax.host_count()))
dataset_builder = tfds.builder("caltech_birds2011")
num_classes = 200
# Make sure each host uses a different RNG for the training data.
rng, data_rng = jax.random.split(rng)
data_rng = jax.random.fold_in(data_rng, jax.host_id())
data_rng, shuffle_rng = jax.random.split(data_rng)
if mode == "train-val":
read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0])
ds = dataset_builder.as_dataset(
split="train", shuffle_files=False, read_config=read_config)
train_ds = ds.take(5000).shuffle(5000, seed=shuffle_rng[0])
eval_ds = ds.skip(5000)
elif mode == "train-test":
train_split = "train"
eval_split = "test"
train_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0])
train_ds = dataset_builder.as_dataset(
split=train_split, shuffle_files=True, read_config=train_read_config)
eval_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[1])
eval_ds = dataset_builder.as_dataset(
split=eval_split, shuffle_files=False, read_config=eval_read_config)
else:
raise ValueError(f"Unknown mode: {mode}.")
return train_ds, eval_ds, num_classes
def _get_tfds_dataset(
dataset: str,
rng: np.ndarray) -> Tuple[tf.data.Dataset, tf.data.Dataset, int]:
"""Loads a TFDS dataset."""
dataset_builder = tfds.builder(dataset)
num_classes = 0
if "label" in dataset_builder.info.features:
num_classes = dataset_builder.info.features["label"].num_classes
# Make sure each host uses a different RNG for the training data.
rng, data_rng = jax.random.split(rng)
data_rng = jax.random.fold_in(data_rng, jax.host_id())
data_rng, shuffle_rng = jax.random.split(data_rng)
train_split = deterministic_data.get_read_instruction_for_host(
"train", dataset_builder.info.splits["train"].num_examples)
train_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0])
train_ds = dataset_builder.as_dataset(
split=train_split, shuffle_files=True, read_config=train_read_config)
eval_split_name = {
"cifar10": "test",
"imagenet2012": "validation"
}.get(dataset, "test")
eval_split_size = dataset_builder.info.splits[eval_split_name].num_examples
eval_split = deterministic_data.get_read_instruction_for_host(
eval_split_name, eval_split_size)
eval_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[1])
eval_ds = dataset_builder.as_dataset(
split=eval_split, shuffle_files=False, read_config=eval_read_config)
return train_ds, eval_ds, num_classes
def _prepare_dataset(
dataset: tf.data.Dataset,
global_batch_size: int,
shuffle: bool,
rng: np.ndarray,
preprocess_fn: Optional[Callable[[Any], Any]] = None,
num_epochs: Optional[int] = None,
filter_fn: Optional[Callable[[Any], Any]] = None) -> tf.data.Dataset:
"""Batches, shuffles, prefetches and preprocesses a dataset.
Args:
dataset: The dataset to prepare.
global_batch_size: The global batch size to use.
shuffle: Whether the shuffle the data on example level.
rng: PRNG for seeding the shuffle operations.
preprocess_fn: Preprocessing function that will be applied to every example.
num_epochs: Number of epochs to repeat the dataset.
filter_fn: Funtion that filters samples according to some criteria.
Returns:
The dataset.
"""
if shuffle and rng is None:
raise ValueError("Shuffling without RNG is not supported.")
if global_batch_size % jax.host_count() != 0:
raise ValueError(f"Batch size {global_batch_size} not divisible by number "
f"of hosts ({jax.host_count()}).")
local_batch_size = global_batch_size // jax.host_count()
batch_dims = [jax.local_device_count(), local_batch_size]
# tf.data uses single integers as seed.
if rng is not None:
rng = rng[0]
ds = dataset.repeat(num_epochs)
if shuffle:
ds = ds.shuffle(1024, seed=rng)
if preprocess_fn is not None:
ds = ds.map(preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if filter_fn is not None:
ds = ds.filter(filter_fn)
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size, drop_remainder=True)
return ds.prefetch(tf.data.experimental.AUTOTUNE)
def parse_preprocessing_strings(training_string, eval_string):
"""Parses conjurer preprocessing strings."""
print(training_string)
print(image_ops.all_ops(), flush=True)
train_preprocessing_fn = preprocess_spec.parse(training_string,
image_ops.all_ops())
eval_preprocessing_fn = preprocess_spec.parse(eval_string,
image_ops.all_ops())
return train_preprocessing_fn, eval_preprocessing_fn
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module manages a distributed RAM cache as a global python dictionary in
each AppEngine instance. AppEngine can spin up new instances or kill old ones
at any time. Each instance's RAM cache is independent and might not have the
same entries as found in the RAM caches of other instances.
Each instance will do the work needed to compute a given RAM cache entry
itself. The values computed in a given instance will speed up future requests
made to that instance only.
When the user edits something in the app, the updated entity is stored in
datastore. Also, the singleton SharedInvalidate entity is updated with the
timestamp of the change. Every request handler must start processing a request
by first calling SharedInvalidate.check_for_distributed_invalidation() which
checks for any needed invalidations and clears RAM cache entries in
that instance if needed.
For now, there is only a single RAM cache per instance and when anything is
invalidated, that entire RAM cache is completely cleared. In the future,
invalidations could be compartmentalized by RAM cache type, or even specific
entity IDs. Monorail uses that approach, but existing ChromeStatus code does
not need it.
Calling code must not mutate any value that is passed into set() or returned
from get(). If calling code needs to mutate such objects, it should call
copy.copy() or copy.deepcopy() to avoid unintentional cumulative mutations.
Unlike memcache, this RAM cache has no concept of expiration time. So,
whenever a cached value would become invalid, it must be invalidated.
"""
import logging
import time as time_module
from google.cloud import ndb
client = ndb.Client()
global_cache = {}
expires = {}
# Whenever the cache would have more than this many items, some
# random item is dropped, or the entire cache is cleared.
# If our instances are killed by appengine for exceeding memory limits,
# we can configure larger instances and/or reduce this value.
MAX_CACHE_SIZE = 10000
total_num_hits = 0
total_num_misses = 0
def set(key, value, time=None):
"""Emulate the memcache.set() method using a RAM cache."""
if len(global_cache) + 1 > MAX_CACHE_SIZE:
popped_item = global_cache.popitem()
if popped_item[0] in expires:
del expires[popped_item[0]]
global_cache[key] = value
if time:
expires[key] = int(time_module.time()) + time
def _check_expired(keys):
now = int(time_module.time())
for key in keys:
if key in expires and expires[key] < now:
del expires[key]
del global_cache[key]
def _count_hit_or_miss(keys):
global total_num_hits, total_num_misses
for key in keys:
if key in global_cache:
total_num_hits += 1
verb = 'hit'
else:
total_num_misses += 1
verb = 'miss'
# TODO(jrobbins): Replace this with proper monitoring variables
logging.info('cache %s for %r. Hit ratio: %5.2f%%.', verb, key,
total_num_hits / (total_num_hits + total_num_misses) * 100)
def get(key):
"""Emulate the memcache.get() method using a RAM cache."""
_check_expired([key])
_count_hit_or_miss([key])
return global_cache.get(key)
def get_multi(keys):
"""Emulate the memcache.get_multi() method using a RAM cache."""
_check_expired(keys)
_count_hit_or_miss(keys)
return {
key: global_cache[key]
for key in keys
if key in global_cache
}
def set_multi(entries, time=None):
"""Emulate the memcache.set_multi() method using a RAM cache."""
if len(global_cache) + len(entries) > MAX_CACHE_SIZE:
global_cache.clear()
expires.clear()
global_cache.update(entries)
if time:
expire_time = int(time_module.time()) + time
for key in entries:
expires[key] = expire_time
def delete(key):
"""Emulate the memcache.delete() method using a RAM cache."""
if key in global_cache:
del global_cache[key]
flush_all() # Note: this is wasteful but infrequent in our app.
def flush_all():
"""Emulate the memcache.flush_all() method using a RAM cache.
This does not clear the RAM cache in this instance. That happens
at the start of the next request when the request handler calls
SharedInvalidate.check_for_distributed_invalidation().
"""
SharedInvalidate.invalidate()
class SharedInvalidateParent(ndb.Model):
pass
class SharedInvalidate(ndb.Model):
PARENT_ENTITY_ID = 1234
SINGLETON_ENTITY_ID = 5678
with client.context():
PARENT_KEY = ndb.Key('SharedInvalidateParent', PARENT_ENTITY_ID)
SINGLETON_KEY = ndb.Key(
'SharedInvalidateParent', PARENT_ENTITY_ID,
'SharedInvalidate', SINGLETON_ENTITY_ID)
last_processed_timestamp = None
updated = ndb.DateTimeProperty(auto_now=True)
@classmethod
def invalidate(cls):
"""Tell this and other appengine instances to invalidate their caches."""
singleton = None
entities = cls.query(ancestor=cls.PARENT_KEY).fetch(1)
if entities:
singleton = entities[0]
if not singleton:
singleton = SharedInvalidate(key=cls.SINGLETON_KEY)
singleton.put() # automatically sets singleton.updated to now.
# The cache in each instance (including this one) will be
# cleared on the next call to check_for_distributed_invalidation()
# which should happen at the start of request processing.
@classmethod
def check_for_distributed_invalidation(cls):
"""Check if any appengine instance has invlidated the cache."""
singleton = None
entities = cls.query(ancestor=cls.PARENT_KEY).fetch(1)
if entities:
singleton = entities[0]
if not singleton:
return # No news is good news
if (cls.last_processed_timestamp is None or
singleton.updated > cls.last_processed_timestamp):
global_cache.clear()
expires.clear()
cls.last_processed_timestamp = singleton.updated
def check_for_distributed_invalidation():
"""Just a shorthand way to call the class method."""
SharedInvalidate.check_for_distributed_invalidation()
| |
# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ovs_bridge_test_base
call = mock.call # short hand
class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase):
def setUp(self):
super(OVSIntegrationBridgeTest, self).setUp()
self.setup_bridge_mock('br-int', self.br_int_cls)
self.stamp = self.br.default_cookie
def test_setup_default_table(self):
self.br.setup_default_table()
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[],
match=ofpp.OFPMatch(),
priority=0,
table_id=23)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=60),
],
match=ofpp.OFPMatch(),
priority=0,
table_id=0)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)
]),
],
match=ofpp.OFPMatch(),
priority=3,
table_id=60)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[],
match=ofpp.OFPMatch(),
priority=0,
table_id=24)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan(self):
port = 999
lvid = 888
segmentation_id = 777
self.br.provision_local_vlan(port=port, lvid=lvid,
segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionSetField(
vlan_vid=lvid | ofp.OFPVID_PRESENT),
]),
ofpp.OFPInstructionGotoTable(table_id=60),
],
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=segmentation_id | ofp.OFPVID_PRESENT),
priority=3,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan_novlan(self):
port = 999
lvid = 888
segmentation_id = None
self.br.provision_local_vlan(port=port, lvid=lvid,
segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionPushVlan(),
ofpp.OFPActionSetField(
vlan_vid=lvid | ofp.OFPVID_PRESENT),
]),
ofpp.OFPInstructionGotoTable(table_id=60),
],
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=ofp.OFPVID_NONE),
priority=3,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_reclaim_local_vlan(self):
port = 999
segmentation_id = 777
self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.uninstall_flows(
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=segmentation_id | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_reclaim_local_vlan_novlan(self):
port = 999
segmentation_id = None
self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.uninstall_flows(
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=ofp.OFPVID_NONE)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_dvr_to_src_mac(self):
network_type = 'vxlan'
vlan_tag = 1111
gateway_mac = '08:60:6e:7f:74:e7'
dst_mac = '00:02:b3:13:fe:3d'
dst_port = 6666
self.br.install_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
gateway_mac=gateway_mac,
dst_mac=dst_mac,
dst_port=dst_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionSetField(eth_src=gateway_mac),
]),
ofpp.OFPInstructionGotoTable(table_id=60),
],
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=4,
table_id=1)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionPopVlan(),
ofpp.OFPActionOutput(6666, 0),
]),
],
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=4,
table_id=60)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_dvr_to_src_mac(self):
network_type = 'vxlan'
vlan_tag = 1111
dst_mac = '00:02:b3:13:fe:3d'
self.br.delete_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
dst_mac=dst_mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.uninstall_flows(
strict=True,
priority=4,
table_id=1,
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
call.uninstall_flows(
strict=True,
priority=4,
table_id=60,
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_dvr_to_src_mac_vlan(self):
network_type = 'vlan'
vlan_tag = 1111
gateway_mac = '08:60:6e:7f:74:e7'
dst_mac = '00:02:b3:13:fe:3d'
dst_port = 6666
self.br.install_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
gateway_mac=gateway_mac,
dst_mac=dst_mac,
dst_port=dst_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionSetField(eth_src=gateway_mac),
]),
ofpp.OFPInstructionGotoTable(table_id=60),
],
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=4,
table_id=2)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionPopVlan(),
ofpp.OFPActionOutput(dst_port, 0),
]),
],
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=4,
table_id=60)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_dvr_to_src_mac_vlan(self):
network_type = 'vlan'
vlan_tag = 1111
dst_mac = '00:02:b3:13:fe:3d'
self.br.delete_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
dst_mac=dst_mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.uninstall_flows(
strict=True,
priority=4,
table_id=2,
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
call.uninstall_flows(
strict=True,
priority=4,
table_id=60,
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_add_dvr_mac_vlan(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.add_dvr_mac_vlan(mac=mac, port=port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=2),
],
match=ofpp.OFPMatch(
eth_src=mac,
in_port=port),
priority=4,
table_id=0))
]
self.assertEqual(expected, self.mock.mock_calls)
def test_remove_dvr_mac_vlan(self):
mac = '00:02:b3:13:fe:3d'
self.br.remove_dvr_mac_vlan(mac=mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.uninstall_flows(eth_src=mac, table_id=0),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_add_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.add_dvr_mac_tun(mac=mac, port=port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=1),
],
match=ofpp.OFPMatch(
eth_src=mac,
in_port=port),
priority=2,
table_id=0))
]
self.assertEqual(expected, self.mock.mock_calls)
def test_remove_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.remove_dvr_mac_tun(mac=mac, port=port)
expected = [
call.uninstall_flows(eth_src=mac, in_port=port, table_id=0),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_icmpv6_na_spoofing_protection(self):
port = 8888
ip_addresses = ['2001:db8::1', 'fdf8:f53b:82e4::1/128']
self.br.install_icmpv6_na_spoofing_protection(port, ip_addresses)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=60),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
ipv6_nd_target='2001:db8::1',
in_port=8888,
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=60),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
ipv6_nd_target='fdf8:f53b:82e4::1',
in_port=8888,
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=24),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
in_port=8888,
),
priority=10,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_arp_spoofing_protection(self):
port = 8888
ip_addresses = ['192.0.2.1', '192.0.2.2/32']
self.br.install_arp_spoofing_protection(port, ip_addresses)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=25),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
arp_spa='192.0.2.1',
in_port=8888,
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=25),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
arp_spa='192.0.2.2',
in_port=8888
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=self.stamp,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=24),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
in_port=8888,
),
priority=10,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_arp_spoofing_protection(self):
port = 8888
self.br.delete_arp_spoofing_protection(port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.uninstall_flows(table_id=0, match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
in_port=8888)),
call.uninstall_flows(table_id=0, match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
in_port=8888,
ip_proto=self.in_proto.IPPROTO_ICMPV6)),
call.uninstall_flows(table_id=24, in_port=port),
]
self.assertEqual(expected, self.mock.mock_calls)
| |
"""
This module (mostly) uses the XenAPI to manage Xen virtual machines.
Big fat warning: the XenAPI used in this file is the one bundled with
Xen Source, NOT XenServer nor Xen Cloud Platform. As a matter of fact it
*will* fail under those platforms. From what I've read, little work is needed
to adapt this code to XS/XCP, mostly playing with XenAPI version, but as
XCP is not taking precedence on Xen Source on many platforms, please keep
compatibility in mind.
Useful documentation:
. http://downloads.xen.org/Wiki/XenAPI/xenapi-1.0.6.pdf
. http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/api/
. https://github.com/xapi-project/xen-api/tree/master/scripts/examples/python
. http://xenbits.xen.org/gitweb/?p=xen.git;a=tree;f=tools/python/xen/xm;hb=HEAD
"""
import contextlib
import os
import sys
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
try:
import importlib # pylint: disable=minimum-python-version
HAS_IMPORTLIB = True
except ImportError:
# Python < 2.7 does not have importlib
HAS_IMPORTLIB = False
# Define the module's virtual name
__virtualname__ = "virt"
# This module has only been tested on Debian GNU/Linux and NetBSD, it
# probably needs more path appending for other distributions.
# The path to append is the path to python Xen libraries, where resides
# XenAPI.
def _check_xenapi():
if __grains__["os"] == "Debian":
debian_xen_version = "/usr/lib/xen-common/bin/xen-version"
if os.path.isfile(debian_xen_version):
# __salt__ is not available in __virtual__
xenversion = salt.modules.cmdmod._run_quiet(debian_xen_version)
xapipath = "/usr/lib/xen-{}/lib/python".format(xenversion)
if os.path.isdir(xapipath):
sys.path.append(xapipath)
try:
if HAS_IMPORTLIB:
return importlib.import_module("xen.xm.XenAPI")
return __import__("xen.xm.XenAPI").xm.XenAPI
except (ImportError, AttributeError):
return False
def __virtual__():
if _check_xenapi() is not False:
return __virtualname__
return (False, "Module xapi: xenapi check failed")
@contextlib.contextmanager
def _get_xapi_session():
"""
Get a session to XenAPI. By default, use the local UNIX socket.
"""
_xenapi = _check_xenapi()
xapi_uri = __salt__["config.option"]("xapi.uri")
xapi_login = __salt__["config.option"]("xapi.login")
xapi_password = __salt__["config.option"]("xapi.password")
if not xapi_uri:
# xend local UNIX socket
xapi_uri = "httpu:///var/run/xend/xen-api.sock"
if not xapi_login:
xapi_login = ""
if not xapi_password:
xapi_password = ""
try:
session = _xenapi.Session(xapi_uri)
session.xenapi.login_with_password(xapi_login, xapi_password)
yield session.xenapi
except Exception: # pylint: disable=broad-except
raise CommandExecutionError("Failed to connect to XenAPI socket.")
finally:
session.xenapi.session.logout()
# Used rectypes (Record types):
#
# host
# host_cpu
# VM
# VIF
# VBD
def _get_xtool():
"""
Internal, returns xl or xm command line path
"""
for xtool in ["xl", "xm"]:
path = salt.utils.path.which(xtool)
if path is not None:
return path
def _get_all(xapi, rectype):
"""
Internal, returns all members of rectype
"""
return getattr(xapi, rectype).get_all()
def _get_label_uuid(xapi, rectype, label):
"""
Internal, returns label's uuid
"""
try:
return getattr(xapi, rectype).get_by_name_label(label)[0]
except Exception: # pylint: disable=broad-except
return False
def _get_record(xapi, rectype, uuid):
"""
Internal, returns a full record for uuid
"""
return getattr(xapi, rectype).get_record(uuid)
def _get_record_by_label(xapi, rectype, label):
"""
Internal, returns a full record for uuid
"""
uuid = _get_label_uuid(xapi, rectype, label)
if uuid is False:
return False
return getattr(xapi, rectype).get_record(uuid)
def _get_metrics_record(xapi, rectype, record):
"""
Internal, returns metrics record for a rectype
"""
metrics_id = record["metrics"]
return getattr(xapi, "{}_metrics".format(rectype)).get_record(metrics_id)
def _get_val(record, keys):
"""
Internal, get value from record
"""
data = record
for key in keys:
if key in data:
data = data[key]
else:
return None
return data
def list_domains():
"""
Return a list of virtual machine names on the minion
CLI Example:
.. code-block:: bash
salt '*' virt.list_domains
"""
with _get_xapi_session() as xapi:
hosts = xapi.VM.get_all()
ret = []
for _host in hosts:
if xapi.VM.get_record(_host)["is_control_domain"] is False:
ret.append(xapi.VM.get_name_label(_host))
return ret
def vm_info(vm_=None):
"""
Return detailed information about the vms.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_info
"""
with _get_xapi_session() as xapi:
def _info(vm_):
vm_rec = _get_record_by_label(xapi, "VM", vm_)
if vm_rec is False:
return False
vm_metrics_rec = _get_metrics_record(xapi, "VM", vm_rec)
return {
"cpu": vm_metrics_rec["VCPUs_number"],
"maxCPU": _get_val(vm_rec, ["VCPUs_max"]),
"cputime": vm_metrics_rec["VCPUs_utilisation"],
"disks": get_disks(vm_),
"nics": get_nics(vm_),
"maxMem": int(_get_val(vm_rec, ["memory_dynamic_max"])),
"mem": int(vm_metrics_rec["memory_actual"]),
"state": _get_val(vm_rec, ["power_state"]),
}
info = {}
if vm_:
ret = _info(vm_)
if ret is not None:
info[vm_] = ret
else:
for vm_ in list_domains():
ret = _info(vm_)
if ret is not None:
info[vm_] = _info(vm_)
return info
def vm_state(vm_=None):
"""
Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <vm name>
"""
with _get_xapi_session() as xapi:
info = {}
if vm_:
info[vm_] = _get_record_by_label(xapi, "VM", vm_)["power_state"]
return info
for vm_ in list_domains():
info[vm_] = _get_record_by_label(xapi, "VM", vm_)["power_state"]
return info
def node_info():
"""
Return a dict with information about this node
CLI Example:
.. code-block:: bash
salt '*' virt.node_info
"""
with _get_xapi_session() as xapi:
# get node uuid
host_rec = _get_record(xapi, "host", _get_all(xapi, "host")[0])
# get first CPU (likely to be a core) uuid
host_cpu_rec = _get_record(xapi, "host_cpu", host_rec["host_CPUs"][0])
# get related metrics
host_metrics_rec = _get_metrics_record(xapi, "host", host_rec)
# adapted / cleaned up from Xen's xm
def getCpuMhz():
cpu_speeds = [
int(host_cpu_rec["speed"])
for host_cpu_it in host_cpu_rec
if "speed" in host_cpu_it
]
if cpu_speeds:
return sum(cpu_speeds) / len(cpu_speeds)
else:
return 0
def getCpuFeatures():
if host_cpu_rec:
return host_cpu_rec["features"]
def getFreeCpuCount():
cnt = 0
for host_cpu_it in host_cpu_rec:
if len(host_cpu_rec["cpu_pool"]) == 0:
cnt += 1
return cnt
info = {
"cpucores": _get_val(host_rec, ["cpu_configuration", "nr_cpus"]),
"cpufeatures": getCpuFeatures(),
"cpumhz": getCpuMhz(),
"cpuarch": _get_val(host_rec, ["software_version", "machine"]),
"cputhreads": _get_val(host_rec, ["cpu_configuration", "threads_per_core"]),
"phymemory": int(host_metrics_rec["memory_total"]) / 1024 / 1024,
"cores_per_sockets": _get_val(
host_rec, ["cpu_configuration", "cores_per_socket"]
),
"free_cpus": getFreeCpuCount(),
"free_memory": int(host_metrics_rec["memory_free"]) / 1024 / 1024,
"xen_major": _get_val(host_rec, ["software_version", "xen_major"]),
"xen_minor": _get_val(host_rec, ["software_version", "xen_minor"]),
"xen_extra": _get_val(host_rec, ["software_version", "xen_extra"]),
"xen_caps": " ".join(_get_val(host_rec, ["capabilities"])),
"xen_scheduler": _get_val(host_rec, ["sched_policy"]),
"xen_pagesize": _get_val(host_rec, ["other_config", "xen_pagesize"]),
"platform_params": _get_val(host_rec, ["other_config", "platform_params"]),
"xen_commandline": _get_val(host_rec, ["other_config", "xen_commandline"]),
"xen_changeset": _get_val(host_rec, ["software_version", "xen_changeset"]),
"cc_compiler": _get_val(host_rec, ["software_version", "cc_compiler"]),
"cc_compile_by": _get_val(host_rec, ["software_version", "cc_compile_by"]),
"cc_compile_domain": _get_val(
host_rec, ["software_version", "cc_compile_domain"]
),
"cc_compile_date": _get_val(
host_rec, ["software_version", "cc_compile_date"]
),
"xend_config_format": _get_val(
host_rec, ["software_version", "xend_config_format"]
),
}
return info
def get_nics(vm_):
"""
Return info about the network interfaces of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_nics <vm name>
"""
with _get_xapi_session() as xapi:
nic = {}
vm_rec = _get_record_by_label(xapi, "VM", vm_)
if vm_rec is False:
return False
for vif in vm_rec["VIFs"]:
vif_rec = _get_record(xapi, "VIF", vif)
nic[vif_rec["MAC"]] = {
"mac": vif_rec["MAC"],
"device": vif_rec["device"],
"mtu": vif_rec["MTU"],
}
return nic
def get_macs(vm_):
"""
Return a list off MAC addresses from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <vm name>
"""
macs = []
nics = get_nics(vm_)
if nics is None:
return None
for nic in nics:
macs.append(nic)
return macs
def get_disks(vm_):
"""
Return the disks of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_disks <vm name>
"""
with _get_xapi_session() as xapi:
disk = {}
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
for vbd in xapi.VM.get_VBDs(vm_uuid):
dev = xapi.VBD.get_device(vbd)
if not dev:
continue
prop = xapi.VBD.get_runtime_properties(vbd)
disk[dev] = {
"backend": prop["backend"],
"type": prop["device-type"],
"protocol": prop["protocol"],
}
return disk
def setmem(vm_, memory):
"""
Changes the amount of memory allocated to VM.
Memory is to be specified in MB
CLI Example:
.. code-block:: bash
salt '*' virt.setmem myvm 768
"""
with _get_xapi_session() as xapi:
mem_target = int(memory) * 1024 * 1024
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.set_memory_dynamic_max_live(vm_uuid, mem_target)
xapi.VM.set_memory_dynamic_min_live(vm_uuid, mem_target)
return True
except Exception: # pylint: disable=broad-except
return False
def setvcpus(vm_, vcpus):
"""
Changes the amount of vcpus allocated to VM.
vcpus is an int representing the number to be assigned
CLI Example:
.. code-block:: bash
salt '*' virt.setvcpus myvm 2
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.set_VCPUs_number_live(vm_uuid, vcpus)
return True
except Exception: # pylint: disable=broad-except
return False
def vcpu_pin(vm_, vcpu, cpus):
"""
Set which CPUs a VCPU can use.
CLI Example:
.. code-block:: bash
salt 'foo' virt.vcpu_pin domU-id 2 1
salt 'foo' virt.vcpu_pin domU-id 2 2-6
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
# from xm's main
def cpu_make_map(cpulist):
cpus = []
for c in cpulist.split(","):
if c == "":
continue
if "-" in c:
(x, y) = c.split("-")
for i in range(int(x), int(y) + 1):
cpus.append(int(i))
else:
# remove this element from the list
if c[0] == "^":
cpus = [x for x in cpus if x != int(c[1:])]
else:
cpus.append(int(c))
cpus.sort()
return ",".join(map(str, cpus))
if cpus == "all":
cpumap = cpu_make_map("0-63")
else:
cpumap = cpu_make_map("{}".format(cpus))
try:
xapi.VM.add_to_VCPUs_params_live(vm_uuid, "cpumap{}".format(vcpu), cpumap)
return True
# VM.add_to_VCPUs_params_live() implementation in xend 4.1+ has
# a bug which makes the client call fail.
# That code is accurate for all others XenAPI implementations, but
# for that particular one, fallback to xm / xl instead.
except Exception: # pylint: disable=broad-except
return __salt__["cmd.run"](
"{} vcpu-pin {} {} {}".format(_get_xtool(), vm_, vcpu, cpus),
python_shell=False,
)
def freemem():
"""
Return an int representing the amount of memory that has not been given
to virtual machines on this node
CLI Example:
.. code-block:: bash
salt '*' virt.freemem
"""
return node_info()["free_memory"]
def freecpu():
"""
Return an int representing the number of unallocated cpus on this
hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.freecpu
"""
return node_info()["free_cpus"]
def full_info():
"""
Return the node_info, vm_info and freemem
CLI Example:
.. code-block:: bash
salt '*' virt.full_info
"""
return {"node_info": node_info(), "vm_info": vm_info()}
def shutdown(vm_):
"""
Send a soft shutdown signal to the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.shutdown <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.clean_shutdown(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def pause(vm_):
"""
Pause the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.pause <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.pause(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def resume(vm_):
"""
Resume the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.resume <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.unpause(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def start(config_):
"""
Start a defined domain
CLI Example:
.. code-block:: bash
salt '*' virt.start <path to Xen cfg file>
"""
# FIXME / TODO
# This function does NOT use the XenAPI. Instead, it use good old xm / xl.
# On Xen Source, creating a virtual machine using XenAPI is really painful.
# XCP / XS make it really easy using xapi.Async.VM.start instead. Anyone?
return __salt__["cmd.run"](
"{} create {}".format(_get_xtool(), config_), python_shell=False
)
def reboot(vm_):
"""
Reboot a domain via ACPI request
CLI Example:
.. code-block:: bash
salt '*' virt.reboot <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.clean_reboot(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def reset(vm_):
"""
Reset a VM by emulating the reset button on a physical machine
CLI Example:
.. code-block:: bash
salt '*' virt.reset <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.hard_reboot(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def migrate(vm_, target, live=1, port=0, node=-1, ssl=None, change_home_server=0):
"""
Migrates the virtual machine to another hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.migrate <vm name> <target hypervisor> [live] [port] [node] [ssl] [change_home_server]
Optional values:
live
Use live migration
port
Use a specified port
node
Use specified NUMA node on target
ssl
use ssl connection for migration
change_home_server
change home server for managed domains
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
other_config = {
"port": port,
"node": node,
"ssl": ssl,
"change_home_server": change_home_server,
}
try:
xapi.VM.migrate(vm_uuid, target, bool(live), other_config)
return True
except Exception: # pylint: disable=broad-except
return False
def stop(vm_):
"""
Hard power down the virtual machine, this is equivalent to pulling the
power
CLI Example:
.. code-block:: bash
salt '*' virt.stop <vm name>
"""
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
try:
xapi.VM.hard_shutdown(vm_uuid)
return True
except Exception: # pylint: disable=broad-except
return False
def is_hyper():
"""
Returns a bool whether or not this node is a hypervisor of any kind
CLI Example:
.. code-block:: bash
salt '*' virt.is_hyper
"""
try:
if __grains__["virtual_subtype"] != "Xen Dom0":
return False
except KeyError:
# virtual_subtype isn't set everywhere.
return False
try:
with salt.utils.files.fopen("/proc/modules") as fp_:
if "xen_" not in salt.utils.stringutils.to_unicode(fp_.read()):
return False
except OSError:
return False
# there must be a smarter way...
return "xenstore" in __salt__["cmd.run"](__grains__["ps"])
def vm_cputime(vm_=None):
"""
Return cputime used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'cputime' <int>
'cputime_percent' <int>
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_cputime
"""
with _get_xapi_session() as xapi:
def _info(vm_):
host_rec = _get_record_by_label(xapi, "VM", vm_)
host_cpus = len(host_rec["host_CPUs"])
if host_rec is False:
return False
host_metrics = _get_metrics_record(xapi, "VM", host_rec)
vcpus = int(host_metrics["VCPUs_number"])
cputime = int(host_metrics["VCPUs_utilisation"]["0"])
cputime_percent = 0
if cputime:
# Divide by vcpus to always return a number between 0 and 100
cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus
return {
"cputime": int(cputime),
"cputime_percent": int("{:.0f}".format(cputime_percent)),
}
info = {}
if vm_:
info[vm_] = _info(vm_)
return info
for vm_ in list_domains():
info[vm_] = _info(vm_)
return info
def vm_netstats(vm_=None):
"""
Return combined network counters used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'io_read_kbs' : 0,
'io_total_read_kbs' : 0,
'io_total_write_kbs' : 0,
'io_write_kbs' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_netstats
"""
with _get_xapi_session() as xapi:
def _info(vm_):
ret = {}
vm_rec = _get_record_by_label(xapi, "VM", vm_)
if vm_rec is False:
return False
for vif in vm_rec["VIFs"]:
vif_rec = _get_record(xapi, "VIF", vif)
ret[vif_rec["device"]] = _get_metrics_record(xapi, "VIF", vif_rec)
del ret[vif_rec["device"]]["last_updated"]
return ret
info = {}
if vm_:
info[vm_] = _info(vm_)
else:
for vm_ in list_domains():
info[vm_] = _info(vm_)
return info
def vm_diskstats(vm_=None):
"""
Return disk usage counters used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'io_read_kbs' : 0,
'io_write_kbs' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_diskstats
"""
with _get_xapi_session() as xapi:
def _info(vm_):
ret = {}
vm_uuid = _get_label_uuid(xapi, "VM", vm_)
if vm_uuid is False:
return False
for vbd in xapi.VM.get_VBDs(vm_uuid):
vbd_rec = _get_record(xapi, "VBD", vbd)
ret[vbd_rec["device"]] = _get_metrics_record(xapi, "VBD", vbd_rec)
del ret[vbd_rec["device"]]["last_updated"]
return ret
info = {}
if vm_:
info[vm_] = _info(vm_)
else:
for vm_ in list_domains():
info[vm_] = _info(vm_)
return info
| |
import py, pytest
import os
from _pytest.resultlog import generic_path, ResultLog, \
pytest_configure, pytest_unconfigure
from _pytest.main import Node, Item, FSCollector
def test_generic_path(testdir):
from _pytest.main import Session
config = testdir.parseconfig()
session = Session(config)
p1 = Node('a', config=config, session=session)
#assert p1.fspath is None
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
item = Item('c', parent = p3)
res = generic_path(item)
assert res == 'a.B().c'
p0 = FSCollector('proj/test', config=config, session=session)
p1 = FSCollector('proj/test/a', parent=p0)
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
p4 = Node('c', parent=p3)
item = Item('[1]', parent = p4)
res = generic_path(item)
assert res == 'test/a:B().c[1]'
def test_write_log_entry():
reslog = ResultLog(None, None)
reslog.logfile = py.io.TextIO()
reslog.write_log_entry('name', '.', '')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 1
assert entry_lines[0] == '. name'
reslog.logfile = py.io.TextIO()
reslog.write_log_entry('name', 's', 'Skipped')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == 's name'
assert entry_lines[1] == ' Skipped'
reslog.logfile = py.io.TextIO()
reslog.write_log_entry('name', 's', 'Skipped\n')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == 's name'
assert entry_lines[1] == ' Skipped'
reslog.logfile = py.io.TextIO()
longrepr = ' tb1\n tb 2\nE tb3\nSome Error'
reslog.write_log_entry('name', 'F', longrepr)
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 5
assert entry_lines[0] == 'F name'
assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()]
class TestWithFunctionIntegration:
# XXX (hpk) i think that the resultlog plugin should
# provide a Parser object so that one can remain
# ignorant regarding formatting details.
def getresultlog(self, testdir, arg):
resultlog = testdir.tmpdir.join("resultlog")
testdir.plugins.append("resultlog")
args = ["--resultlog=%s" % resultlog] + [arg]
testdir.runpytest(*args)
return [x for x in resultlog.readlines(cr=0) if x]
def test_collection_report(self, testdir):
ok = testdir.makepyfile(test_collection_ok="")
skip = testdir.makepyfile(test_collection_skip=
"import pytest ; pytest.skip('hello')")
fail = testdir.makepyfile(test_collection_fail="XXX")
lines = self.getresultlog(testdir, ok)
assert not lines
lines = self.getresultlog(testdir, skip)
assert len(lines) == 2
assert lines[0].startswith("S ")
assert lines[0].endswith("test_collection_skip.py")
assert lines[1].startswith(" ")
assert lines[1].endswith("test_collection_skip.py:1: Skipped: hello")
lines = self.getresultlog(testdir, fail)
assert lines
assert lines[0].startswith("F ")
assert lines[0].endswith("test_collection_fail.py"), lines[0]
for x in lines[1:]:
assert x.startswith(" ")
assert "XXX" in "".join(lines[1:])
def test_log_test_outcomes(self, testdir):
mod = testdir.makepyfile(test_mod="""
import pytest
def test_pass(): pass
def test_skip(): pytest.skip("hello")
def test_fail(): raise ValueError("FAIL")
@pytest.mark.xfail
def test_xfail(): raise ValueError("XFAIL")
@pytest.mark.xfail
def test_xpass(): pass
""")
lines = self.getresultlog(testdir, mod)
assert len(lines) >= 3
assert lines[0].startswith(". ")
assert lines[0].endswith("test_pass")
assert lines[1].startswith("s "), lines[1]
assert lines[1].endswith("test_skip")
assert lines[2].find("hello") != -1
assert lines[3].startswith("F ")
assert lines[3].endswith("test_fail")
tb = "".join(lines[4:8])
assert tb.find('raise ValueError("FAIL")') != -1
assert lines[8].startswith('x ')
tb = "".join(lines[8:14])
assert tb.find('raise ValueError("XFAIL")') != -1
assert lines[14].startswith('X ')
assert len(lines) == 15
@pytest.mark.parametrize("style", ("native", "long", "short"))
def test_internal_exception(self, style):
# they are produced for example by a teardown failing
# at the end of the run or a failing hook invocation
try:
raise ValueError
except ValueError:
excinfo = py.code.ExceptionInfo()
reslog = ResultLog(None, py.io.TextIO())
reslog.pytest_internalerror(excinfo.getrepr(style=style))
entry = reslog.logfile.getvalue()
entry_lines = entry.splitlines()
assert entry_lines[0].startswith('! ')
if style != "native":
assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class
assert entry_lines[-1][0] == ' '
assert 'ValueError' in entry
def test_generic(testdir, LineMatcher):
testdir.plugins.append("resultlog")
testdir.makepyfile("""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
@pytest.mark.xfail
def test_xfail():
assert 0
@pytest.mark.xfail(run=False)
def test_xfail_norun():
assert 0
""")
testdir.runpytest("--resultlog=result.log")
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
LineMatcher(lines).fnmatch_lines([
". *:test_pass",
"F *:test_fail",
"s *:test_skip",
"x *:test_xfail",
"x *:test_xfail_norun",
])
def test_no_resultlog_on_slaves(testdir):
config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog")
assert not hasattr(config, '_resultlog')
pytest_configure(config)
assert hasattr(config, '_resultlog')
pytest_unconfigure(config)
assert not hasattr(config, '_resultlog')
config.slaveinput = {}
pytest_configure(config)
assert not hasattr(config, '_resultlog')
pytest_unconfigure(config)
assert not hasattr(config, '_resultlog')
def test_failure_issue380(testdir):
testdir.makeconftest("""
import pytest
class MyCollector(pytest.File):
def collect(self):
raise ValueError()
def repr_failure(self, excinfo):
return "somestring"
def pytest_collect_file(path, parent):
return MyCollector(parent=parent, fspath=path)
""")
testdir.makepyfile("""
def test_func():
pass
""")
result = testdir.runpytest("--resultlog=log")
assert result.ret == 1
| |
#!/usr/bin/python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Software License Agreement (BSD License)
#
# Copyright (C) 2012, Austin Robot Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Austin Robot Technology, Inc. nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""
Generate YAML calibration file from Velodyne db.xml.
The input data provided by the manufacturer are in degrees and
centimeters. The YAML file uses radians and meters, following ROS
standards [REP-0103].
"""
from __future__ import print_function
import math
import optparse
import os
import sys
from xml.etree import ElementTree
import yaml
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# parse the command line
usage = """usage: %prog infile.xml [outfile.yaml]
Default output file is input file with .yaml suffix."""
parser = optparse.OptionParser(usage=usage)
options, args = parser.parse_args()
if len(args) < 1:
parser.error('XML file name missing')
sys.exit(9)
xmlFile = args[0]
if len(args) >= 2:
yamlFile = args[1]
else:
yamlFile, ext = os.path.splitext(xmlFile)
yamlFile += '.yaml'
print('converting "' + xmlFile + '" to "' + yamlFile + '"')
calibrationGood = True
def xmlError(msg):
'handle XML calibration error'
global calibrationGood
calibrationGood = False
print('gen_calibration.py: ' + msg)
db = None
try:
db = ElementTree.parse(xmlFile)
except IOError:
xmlError('unable to read ' + xmlFile)
except ElementTree.ParseError:
xmlError('XML parse failed for ' + xmlFile)
if not calibrationGood:
sys.exit(2)
# create a dictionary to hold all relevant calibration values
calibration = {'num_lasers': 0, 'lasers': []}
cm2meters = 0.01 # convert centimeters to meters
def addLaserCalibration(laser_num, key, val):
"""Define key and corresponding value for laser_num"""
global calibration
if laser_num < len(calibration['lasers']):
calibration['lasers'][laser_num][key] = val
else:
calibration['lasers'].append({key: val})
# add enabled flags
num_enabled = 0
enabled_lasers = []
enabled = db.find('DB/enabled_')
if enabled is None:
print('no enabled tags found: assuming all 64 enabled')
num_enabled = 64
enabled_lasers = [True for i in xrange(num_enabled)]
else:
index = 0
for el in enabled:
if el.tag == 'item':
this_enabled = int(el.text) != 0
enabled_lasers.append(this_enabled)
index += 1
if this_enabled:
num_enabled += 1
calibration['num_lasers'] = num_enabled
print(str(num_enabled) + ' lasers')
# add minimum laser intensities
minIntensities = db.find('DB/minIntensity_')
if minIntensities is not None:
index = 0
for el in minIntensities:
if el.tag == 'item':
if enabled_lasers[index]:
value = int(el.text)
if value != 256:
addLaserCalibration(index, 'min_intensity', value)
index += 1
# add maximum laser intensities
maxIntensities = db.find('DB/maxIntensity_')
if maxIntensities is not None:
index = 0
for el in maxIntensities:
if el.tag == 'item':
if enabled_lasers[index]:
value = int(el.text)
if value != 256:
addLaserCalibration(index, 'max_intensity', value)
index += 1
# add calibration information for each laser
for el in db.find('DB/points_'):
if el.tag == 'item':
for px in el:
for field in px:
if field.tag == 'id_':
index = int(field.text)
if not enabled_lasers[index]:
break # skip this laser, it is not enabled
addLaserCalibration(index, 'laser_id', index)
if field.tag == 'rotCorrection_':
addLaserCalibration(index, 'rot_correction',
math.radians(float(field.text)))
elif field.tag == 'vertCorrection_':
addLaserCalibration(index, 'vert_correction',
math.radians(float(field.text)))
elif field.tag == 'distCorrection_':
addLaserCalibration(index, 'dist_correction',
float(field.text) * cm2meters)
elif field.tag == 'distCorrectionX_':
addLaserCalibration(index, 'dist_correction_x',
float(field.text) * cm2meters)
elif field.tag == 'distCorrectionY_':
addLaserCalibration(index, 'dist_correction_y',
float(field.text) * cm2meters)
elif field.tag == 'vertOffsetCorrection_':
addLaserCalibration(index, 'vert_offset_correction',
float(field.text) * cm2meters)
elif field.tag == 'horizOffsetCorrection_':
addLaserCalibration(index, 'horiz_offset_correction',
float(field.text) * cm2meters)
elif field.tag == 'focalDistance_':
addLaserCalibration(index, 'focal_distance',
float(field.text) * cm2meters)
elif field.tag == 'focalSlope_':
addLaserCalibration(index, 'focal_slope', float(field.text))
# validate input data
if calibration['num_lasers'] <= 0:
xmlError('no lasers defined')
elif calibration['num_lasers'] != num_enabled:
xmlError('inconsistent number of lasers defined')
# TODO: make sure all required fields are present.
# (Which ones are required?)
if calibrationGood:
# write calibration data to YAML file
with open(yamlFile, 'w') as f:
yaml.dump(calibration, f)
| |
import sys
import os
import gzip
import zipfile
from optparse import make_option
from django.conf import settings
from django.core import serializers
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connections, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import get_apps
from django.utils.itercompat import product
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
args = "fixture [fixture ...]"
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.'),
)
def handle(self, *fixture_labels, **options):
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
self.style = no_style()
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
object_count = 0
models = set()
humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed(using=using)
transaction.enter_transaction_management(using=using)
transaction.managed(True, using=using)
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: file,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = compression_types.keys()
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity >= 2:
self.stdout.write("Loading '%s' fixtures...\n" % fixture_name)
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format.\n" %
(fixture_name, format)))
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity >= 2:
self.stdout.write("Checking %s for fixtures...\n" % humanize(fixture_dir))
label_found = False
for combo in product([using, None], formats, compression_formats):
database, format, compression_format = combo
file_name = '.'.join(
p for p in [
fixture_name, database, format, compression_format
]
if p
)
if verbosity >= 3:
self.stdout.write("Trying %s for %s fixture '%s'...\n" % \
(humanize(fixture_dir), file_name, fixture_name))
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
if label_found:
fixture.close()
self.stderr.write(self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir))))
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
else:
fixture_count += 1
objects_in_fixture = 0
if verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
try:
objects = serializers.deserialize(format, fixture, using=using)
for obj in objects:
if router.allow_syncdb(using, obj.object.__class__):
objects_in_fixture += 1
models.add(obj.object.__class__)
obj.save(using=using)
object_count += objects_in_fixture
label_found = True
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, ''.join(traceback.format_exception(sys.exc_type,
sys.exc_value, sys.exc_traceback)))))
return
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
sys.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)\n" %
(fixture_name)))
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
except Exception, e:
if verbosity >= 2:
self.stdout.write("No %s fixture '%s' in %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
# If we found even one object in a fixture, we need to reset the
# database sequences.
if object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity >= 2:
self.stdout.write("Resetting sequences\n")
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit(using=using)
transaction.leave_transaction_management(using=using)
if object_count == 0:
if verbosity >= 1:
self.stdout.write("No fixtures found.\n")
else:
if verbosity >= 1:
self.stdout.write("Installed %d object(s) from %d fixture(s)\n" % (object_count, fixture_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
| |
"""
Support for the Netatmo binary sensors.
The binary sensors based on events seen by the Netatmo cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.netatmo/.
"""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA)
from homeassistant.components.netatmo import CameraData
from homeassistant.loader import get_component
from homeassistant.const import CONF_TIMEOUT, CONF_OFFSET
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['netatmo']
# These are the available sensors mapped to binary_sensor class
WELCOME_SENSOR_TYPES = {
"Someone known": "motion",
"Someone unknown": "motion",
"Motion": "motion",
}
PRESENCE_SENSOR_TYPES = {
"Outdoor motion": "motion",
"Outdoor human": "motion",
"Outdoor animal": "motion",
"Outdoor vehicle": "motion"
}
TAG_SENSOR_TYPES = {
"Tag Vibration": "vibration",
"Tag Open": "opening"
}
CONF_HOME = 'home'
CONF_CAMERAS = 'cameras'
CONF_WELCOME_SENSORS = 'welcome_sensors'
CONF_PRESENCE_SENSORS = 'presence_sensors'
CONF_TAG_SENSORS = 'tag_sensors'
DEFAULT_TIMEOUT = 15
DEFAULT_OFFSET = 90
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_CAMERAS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOME): cv.string,
vol.Optional(CONF_OFFSET, default=DEFAULT_OFFSET): cv.positive_int,
vol.Optional(CONF_PRESENCE_SENSORS, default=PRESENCE_SENSOR_TYPES):
vol.All(cv.ensure_list, [vol.In(PRESENCE_SENSOR_TYPES)]),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_WELCOME_SENSORS, default=WELCOME_SENSOR_TYPES):
vol.All(cv.ensure_list, [vol.In(WELCOME_SENSOR_TYPES)]),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
netatmo = get_component('netatmo')
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
offset = config.get(CONF_OFFSET)
module_name = None
import lnetatmo
try:
data = CameraData(netatmo.NETATMO_AUTH, home)
if not data.get_camera_names():
return None
except lnetatmo.NoDevice:
return None
welcome_sensors = config.get(
CONF_WELCOME_SENSORS, WELCOME_SENSOR_TYPES)
presence_sensors = config.get(
CONF_PRESENCE_SENSORS, PRESENCE_SENSOR_TYPES)
tag_sensors = config.get(CONF_TAG_SENSORS, TAG_SENSOR_TYPES)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if camera_type == 'NACamera':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in welcome_sensors:
add_devices([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
offset, camera_type, variable)], True)
if camera_type == 'NOC':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in presence_sensors:
add_devices([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout, offset,
camera_type, variable)], True)
for module_name in data.get_module_names(camera_name):
for variable in tag_sensors:
camera_type = None
add_devices([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout, offset,
camera_type, variable)], True)
class NetatmoBinarySensor(BinarySensorDevice):
"""Represent a single binary sensor in a Netatmo Camera device."""
def __init__(self, data, camera_name, module_name, home,
timeout, offset, camera_type, sensor):
"""Set up for access to the Netatmo camera events."""
self._data = data
self._camera_name = camera_name
self._module_name = module_name
self._home = home
self._timeout = timeout
self._offset = offset
if home:
self._name = '{} / {}'.format(home, camera_name)
else:
self._name = camera_name
if module_name:
self._name += ' / ' + module_name
self._sensor_name = sensor
self._name += ' ' + sensor
camera_id = data.camera_data.cameraByName(
camera=camera_name, home=home)['id']
self._unique_id = "Netatmo_binary_sensor {0} - {1}".format(
self._name, camera_id)
self._cameratype = camera_type
self._state = None
@property
def name(self):
"""Return the name of the Netatmo device and this sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
if self._cameratype == 'NACamera':
return WELCOME_SENSOR_TYPES.get(self._sensor_name)
elif self._cameratype == 'NOC':
return PRESENCE_SENSOR_TYPES.get(self._sensor_name)
return TAG_SENSOR_TYPES.get(self._sensor_name)
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the Netatmo API."""
self._data.update()
self._data.update_event()
if self._cameratype == 'NACamera':
if self._sensor_name == "Someone known":
self._state =\
self._data.camera_data.someoneKnownSeen(
self._home, self._camera_name, self._timeout*60)
elif self._sensor_name == "Someone unknown":
self._state =\
self._data.camera_data.someoneUnknownSeen(
self._home, self._camera_name, self._timeout*60)
elif self._sensor_name == "Motion":
self._state =\
self._data.camera_data.motionDetected(
self._home, self._camera_name, self._timeout*60)
elif self._cameratype == 'NOC':
if self._sensor_name == "Outdoor motion":
self._state =\
self._data.camera_data.outdoormotionDetected(
self._home, self._camera_name, self._offset)
elif self._sensor_name == "Outdoor human":
self._state =\
self._data.camera_data.humanDetected(
self._home, self._camera_name, self._offset)
elif self._sensor_name == "Outdoor animal":
self._state =\
self._data.camera_data.animalDetected(
self._home, self._camera_name, self._offset)
elif self._sensor_name == "Outdoor vehicle":
self._state =\
self._data.camera_data.carDetected(
self._home, self._camera_name, self._offset)
if self._sensor_name == "Tag Vibration":
self._state =\
self._data.camera_data.moduleMotionDetected(
self._home, self._module_name, self._camera_name,
self._timeout*60)
elif self._sensor_name == "Tag Open":
self._state =\
self._data.camera_data.moduleOpened(
self._home, self._module_name, self._camera_name)
else:
return None
| |
#!/usr/bin/env python
"""
Subrequest for partial content checks.
"""
from configparser import SectionProxy
import random
from typing import TYPE_CHECKING
from redbot.resource.active_check.base import SubRequest
from redbot.formatter import f_num
from redbot.speak import Note, categories, levels, display_bytes
from redbot.type import StrHeaderListType
if TYPE_CHECKING:
from redbot.resource import HttpResource # pylint: disable=cyclic-import
class RangeRequest(SubRequest):
"Check for partial content support (if advertised)"
check_name = "Partial Content"
response_phrase = "The partial response"
def __init__(self, config: SectionProxy, resource: "HttpResource") -> None:
self.range_start = None # type: int
self.range_end = None # type: int
self.range_target = None # type: bytes
SubRequest.__init__(self, config, resource)
def modify_request_headers(
self, base_headers: StrHeaderListType
) -> StrHeaderListType:
if self.base.response.payload_sample:
sample_num = random.randint(0, len(self.base.response.payload_sample) - 1)
sample_len = min(96, len(self.base.response.payload_sample[sample_num][1]))
self.range_start = self.base.response.payload_sample[sample_num][0]
self.range_end = self.range_start + sample_len
self.range_target = self.base.response.payload_sample[sample_num][1][
: sample_len + 1
]
base_headers.append(
("Range", "bytes=%s-%s" % (self.range_start, self.range_end))
)
return base_headers
def preflight(self) -> bool:
if self.base.response.status_code[0] == "3":
return False
if self.base.response.status_code == "206":
return False
if "bytes" in self.base.response.parsed_headers.get("accept-ranges", []):
if not self.base.response.payload_sample:
return False
if self.range_start == self.range_end:
# wow, that's a small body.
return False
return True
self.base.partial_support = False
return False
def done(self) -> None:
if not self.response.complete:
if self.response.http_error:
problem = self.response.http_error.desc
else:
problem = ""
self.add_base_note("", RANGE_SUBREQ_PROBLEM, problem=problem)
return
if self.response.status_code == "206":
c_e = "content-encoding"
if (
"gzip"
in self.base.response.parsed_headers.get(c_e, [])
== "gzip"
not in self.response.parsed_headers.get(c_e, [])
):
self.add_base_note(
"header-accept-ranges header-content-encoding", RANGE_NEG_MISMATCH
)
return
self.check_missing_hdrs(
[
"date",
"cache-control",
"content-location",
"etag",
"expires",
"vary",
],
MISSING_HDRS_206,
)
if self.response.parsed_headers.get(
"etag", None
) == self.base.response.parsed_headers.get("etag", None):
if self.response.payload == self.range_target:
self.base.partial_support = True
self.add_base_note("header-accept-ranges", RANGE_CORRECT)
else:
# the body samples are just bags of bits
self.base.partial_support = False
self.add_base_note(
"header-accept-ranges",
RANGE_INCORRECT,
range="bytes=%s-%s" % (self.range_start, self.range_end),
range_expected=display_bytes(self.range_target),
range_expected_bytes=f_num(len(self.range_target)),
range_received=display_bytes(self.response.payload),
range_received_bytes=f_num(self.response.payload_len),
)
else:
self.add_base_note("header-accept-ranges", RANGE_CHANGED)
elif self.response.status_code == self.base.response.status_code:
self.base.partial_support = False
self.add_base_note("header-accept-ranges", RANGE_FULL)
else:
self.add_base_note(
"header-accept-ranges",
RANGE_STATUS,
range_status=self.response.status_code,
enc_range_status=self.response.status_code or "(unknown)",
)
class RANGE_SUBREQ_PROBLEM(Note):
category = categories.RANGE
level = levels.INFO
summary = "There was a problem checking for Partial Content support."
text = """\
When REDbot tried to check the resource for partial content support, there was a problem:
`%(problem)s`
Trying again might fix it."""
class UNKNOWN_RANGE(Note):
category = categories.RANGE
level = levels.WARN
summary = "%(response)s advertises support for non-standard range-units."
text = """\
The `Accept-Ranges` response header tells clients what `range-unit`s a resource is willing to
process in future requests. HTTP only defines two: `bytes` and `none`.
Clients who don't know about the non-standard range-unit will not be able to use it."""
class RANGE_CORRECT(Note):
category = categories.RANGE
level = levels.GOOD
summary = "A ranged request returned the correct partial content."
text = """\
This resource advertises support for ranged requests with `Accept-Ranges`; that is, it allows
clients to specify that only part of it should be sent. REDbot has tested this by requesting part of
this response, which was returned correctly."""
class RANGE_INCORRECT(Note):
category = categories.RANGE
level = levels.BAD
summary = "A ranged request returned partial content, but it was incorrect."
text = """\
This resource advertises support for ranged requests with `Accept-Ranges`; that is, it allows
clients to specify that only part of the response should be sent. REDbot has tested this by
requesting part of this response, but the partial response doesn't correspond with the full
response retrieved at the same time. This could indicate that the range implementation isn't
working properly.
REDbot sent:
> Range: %(range)s
REDbot expected %(range_expected_bytes)s bytes:
> %(range_expected).100s
REDbot received %(range_received_bytes)s bytes:
> %(range_received).100s
_(showing samples of up to 100 characters)_"""
class RANGE_CHANGED(Note):
category = categories.RANGE
level = levels.WARN
summary = "A ranged request returned another representation."
text = """\
A new representation was retrieved when checking support of ranged request. This is not an error,
it just indicates that REDbot cannot draw any conclusion at this time."""
class RANGE_FULL(Note):
category = categories.RANGE
level = levels.WARN
summary = "A ranged request returned the full rather than partial content."
text = """\
This resource advertises support for ranged requests with `Accept-Ranges`; that is, it allows
clients to specify that only part of the response should be sent. REDbot has tested this by
requesting part of this response, but the entire response was returned. In other words, although
the resource advertises support for partial content, it doesn't appear to actually do so."""
class RANGE_STATUS(Note):
category = categories.RANGE
level = levels.INFO
summary = "A ranged request returned a %(range_status)s status."
text = """\
This resource advertises support for ranged requests; that is, it allows clients to specify that
only part of the response should be sent. REDbot has tested this by requesting part of this
response, but a %(enc_range_status)s response code was returned, which REDbot was not expecting."""
class RANGE_NEG_MISMATCH(Note):
category = categories.RANGE
level = levels.BAD
summary = "Partial responses don't have the same support for compression that full ones do."
text = """\
This resource supports ranged requests and also supports negotiation for gzip compression, but
doesn't support compression for both full and partial responses.
This can cause problems for clients when they compare the partial and full responses, since the
partial response is expressed as a byte range, and compression changes the bytes."""
class MISSING_HDRS_206(Note):
category = categories.VALIDATION
level = levels.WARN
summary = "%(response)s is missing required headers."
text = """\
HTTP requires `206 Partial Content` responses to have certain headers, if they are also present in
a normal (e.g., `200 OK` response).
%(response)s is missing the following headers: `%(missing_hdrs)s`.
This can affect cache operation; because the headers are missing, caches might remove them from
their stored copies."""
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from desktop.lib.paginator import Paginator
from django.utils.functional import wraps
from hadoop import cluster
from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundException, ThriftJobState
import hadoop.yarn.history_server_api as history_server_api
import hadoop.yarn.mapreduce_api as mapreduce_api
import hadoop.yarn.resource_manager_api as resource_manager_api
import hadoop.yarn.node_manager_api as node_manager_api
from jobbrowser.conf import SHARE_JOBS
from jobbrowser.models import Job, JobLinkage, TaskList, Tracker
from jobbrowser.yarn_models import Application, Job as YarnJob, KilledJob as KilledYarnJob, Container, SparkJob
from hadoop.cluster import get_next_ha_mrcluster, get_next_ha_yarncluster
from desktop.lib.exceptions_renderable import PopupException
LOG = logging.getLogger(__name__)
_DEFAULT_OBJ_PER_PAGINATION = 10
def get_api(user, jt):
if cluster.is_yarn():
return YarnApi(user)
else:
return JtApi(jt)
def jt_ha(funct):
"""
Support JT plugin HA by trying other MR cluster.
This modifies the cached JT and so will happen just once by failover.
"""
def decorate(api, *args, **kwargs):
try:
return funct(api, *args, **kwargs)
except Exception, ex:
if 'Could not connect to' in str(ex):
LOG.info('JobTracker not available, trying JT plugin HA: %s.' % ex)
jt_ha = get_next_ha_mrcluster()
if jt_ha is not None:
if jt_ha[1].host == api.jt.host:
raise ex
config, api.jt = jt_ha
return funct(api, *args, **kwargs)
raise ex
return wraps(funct)(decorate)
def rm_ha(funct):
"""
Support RM HA by trying other RM API.
"""
def decorate(api, *args, **kwargs):
try:
return funct(api, *args, **kwargs)
except Exception, ex:
ex_message = str(ex)
if 'Connection refused' in ex_message or 'standby RM' in ex_message:
LOG.info('Resource Manager not available, trying another RM: %s.' % ex)
rm_ha = get_next_ha_yarncluster()
if rm_ha is not None:
if rm_ha[1].url == api.resource_manager_api.url:
raise ex
config, api.resource_manager_api = rm_ha
return funct(api, *args, **kwargs)
raise ex
return wraps(funct)(decorate)
class JobBrowserApi(object):
def paginate_task(self, task_list, pagenum):
paginator = Paginator(task_list, _DEFAULT_OBJ_PER_PAGINATION)
return paginator.page(pagenum)
class JtApi(JobBrowserApi):
def __init__(self, jt):
self.jt = jt
@jt_ha
def get_job_link(self, jobid):
return JobLinkage(self.jt, jobid)
@jt_ha
def get_job(self, jobid):
return Job.from_id(jt=self.jt, jobid=jobid)
@jt_ha
def get_jobs(self, user, **kwargs):
"""
Returns an array of jobs where the returned
jobs are matched by the provided filter arguments.
If a filter argument is in kwargs it will supersede the same argument
in the request object.
Filter arguments may be jobid, pools, user, tasks, text and state.
Filter by user ownership if check_permission is set to true.
"""
jobfunc = {
"completed" : (self.jt.completed_jobs, ThriftJobState.SUCCEEDED),
# Succeeded and completed are synonyms here.
"succeeded" : (self.jt.completed_jobs, ThriftJobState.SUCCEEDED),
"running" : (self.jt.running_jobs, ThriftJobState.RUNNING),
"failed" : (self.jt.failed_jobs, ThriftJobState.FAILED),
"killed" : (self.jt.killed_jobs, ThriftJobState.KILLED),
"all" : (self.jt.all_jobs, None),
None : (self.jt.all_jobs, None)
}
selection = kwargs.pop('state')
retired = kwargs.pop('retired')
jobs = jobfunc[selection][0]().jobs
if retired:
jobs += self.jt.retired_jobs(jobfunc[selection][1]).jobs
return self.filter_jobs(user, jobs, **kwargs)
@jt_ha
def filter_jobs(self, user, jobs, **kwargs):
check_permission = not SHARE_JOBS.get() and not user.is_superuser
limit = kwargs.pop('limit', 10000)
return [Job.from_thriftjob(self.jt, j)
for j in self._filter_jobs(jobs, **kwargs)
if not check_permission or user.is_superuser or j.profile.user == user.username][:limit]
def _filter_jobs(self, jobs, username=None, text=None):
def predicate(job):
"""
Return True if a ThriftJobInProgress structure matches the supplied filters.
If a filter argument is None, everything matches it.
"""
if username and username not in job.profile.user:
return False
if text:
search = text.lower()
# These fields are chosen to match those displayed by the JT UI
saw_text = False
for t in [job.profile.user,
job.profile.name,
job.jobID.asString,
job.profile.queueName,
job.priorityAsString
]:
if search in t.lower():
saw_text = True
break
if not saw_text:
return False
return True
return filter(predicate, jobs)
@jt_ha
def get_tasks(self, jobid, **filters):
return TaskList.select(self.jt,
jobid,
filters['task_types'],
filters['task_states'],
filters['task_text'],
_DEFAULT_OBJ_PER_PAGINATION,
_DEFAULT_OBJ_PER_PAGINATION * (filters['pagenum'] - 1))
@jt_ha
def get_tracker(self, trackerid):
return Tracker.from_name(self.jt, trackerid)
class YarnApi(JobBrowserApi):
"""
List all the jobs with Resource Manager API.
Get running single job information with MapReduce API.
Get finished single job information with History Server API.
The trick is that we use appid when the job is running and jobid when it is finished.
We also suppose that each app id has only one MR job id.
e.g. job_1355791146953_0105, application_1355791146953_0105
A better alternative might be to call the Resource Manager instead of relying on the type of job id.
The perfect solution would be to have all this logic embedded
"""
def __init__(self, user):
self.user = user
self.resource_manager_api = resource_manager_api.get_resource_manager(user)
self.mapreduce_api = mapreduce_api.get_mapreduce_api()
self.history_server_api = history_server_api.get_history_server_api()
def get_job_link(self, job_id):
return self.get_job(job_id)
@rm_ha
def get_jobs(self, user, **kwargs):
state_filters = {'running': 'UNDEFINED', 'completed': 'SUCCEEDED', 'failed': 'FAILED', 'killed': 'KILLED', }
filters = {}
if kwargs['username']:
filters['user'] = kwargs['username']
if kwargs['state'] and kwargs['state'] != 'all':
filters['finalStatus'] = state_filters[kwargs['state']]
if kwargs.get('limit'):
filters['limit'] = kwargs['limit']
json = self.resource_manager_api.apps(**filters)
if type(json) == str and 'This is standby RM' in json:
raise Exception(json)
if json['apps']:
jobs = [Application(app) for app in json['apps']['app']]
else:
return []
if kwargs['text']:
text = kwargs['text'].lower()
jobs = filter(lambda job:
text in job.name.lower() or
text in job.id.lower() or
text in job.user.lower() or
text in job.queue.lower(), jobs)
return self.filter_jobs(user, jobs)
def filter_jobs(self, user, jobs, **kwargs):
check_permission = not SHARE_JOBS.get() and not user.is_superuser
return filter(lambda job:
not check_permission or
user.is_superuser or
job.user == user.username, jobs)
@rm_ha
def get_job(self, jobid):
try:
# App id
jobid = jobid.replace('job', 'application')
job = self.resource_manager_api.app(jobid)['app']
if job['state'] == 'ACCEPTED':
raise ApplicationNotRunning(jobid, job)
elif job['state'] == 'KILLED':
return KilledYarnJob(self.resource_manager_api, job)
if job.get('applicationType') == 'SPARK':
job = SparkJob(job, self.resource_manager_api)
elif job.get('applicationType') == 'MAPREDUCE':
jobid = jobid.replace('application', 'job')
if job['state'] in ('NEW', 'SUBMITTED', 'ACCEPTED', 'RUNNING'):
json = self.mapreduce_api.job(self.user, jobid)
job = YarnJob(self.mapreduce_api, json['job'])
else:
json = self.history_server_api.job(self.user, jobid)
job = YarnJob(self.history_server_api, json['job'])
else:
job = Application(job, self.resource_manager_api)
except ApplicationNotRunning, e:
raise e
except Exception, e:
if 'NotFoundException' in str(e):
raise JobExpired(jobid)
else:
raise PopupException('Job %s could not be found: %s' % (jobid, e), detail=e)
return job
def get_tasks(self, jobid, **filters):
filters.pop('pagenum')
return self.get_job(jobid).filter_tasks(**filters)
def get_task(self, jobid, task_id):
return self.get_job(jobid).task(task_id)
def get_tracker(self, node_manager_http_address, container_id):
api = node_manager_api.get_node_manager_api('http://' + node_manager_http_address)
return Container(api.container(container_id))
class ApplicationNotRunning(Exception):
def __init__(self, application_id, job):
self.application_id = application_id
self.job = job
class JobExpired(Exception):
def __init__(self, job):
super(JobExpired, self).__init__('JobExpired: %s' %job)
self.job = job
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, DateField, DateTimeField,
DecimalField, EmailField, FileField, FloatField, Form,
GenericIPAddressField, IntegerField, IPAddressField, ModelChoiceField,
ModelMultipleChoiceField, MultipleChoiceField, RegexField,
SplitDateTimeField, TimeField, URLField, ValidationError, utils,
)
from django.test import TestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
class AssertFormErrorsMixin(object):
def assertFormErrors(self, expected, the_callable, *args, **kwargs):
try:
the_callable(*args, **kwargs)
self.fail("Testing the 'clean' method on %s failed to raise a ValidationError.")
except ValidationError as e:
self.assertEqual(e.messages, expected)
class FormsErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_charfield(self):
e = {
'required': 'REQUIRED',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = CharField(min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_integerfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = IntegerField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_floatfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = FloatField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_decimalfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
'max_digits': 'MAX DIGITS IS %(max)s',
'max_decimal_places': 'MAX DP IS %(max)s',
'max_whole_digits': 'MAX DIGITS BEFORE DP IS %(max)s',
}
f = DecimalField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
f2 = DecimalField(max_digits=4, decimal_places=2, error_messages=e)
self.assertFormErrors(['MAX DIGITS IS 4'], f2.clean, '123.45')
self.assertFormErrors(['MAX DP IS 2'], f2.clean, '1.234')
self.assertFormErrors(['MAX DIGITS BEFORE DP IS 2'], f2.clean, '123.4')
def test_datefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_timefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = TimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_datetimefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_regexfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = RegexField(r'^[0-9]+$', min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcde')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_emailfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = EmailField(min_length=8, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcdefgh')
self.assertFormErrors(['LENGTH 7, MIN LENGTH 8'], f.clean, 'a@b.com')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, 'aye@bee.com')
def test_filefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'missing': 'MISSING',
'empty': 'EMPTY FILE',
}
f = FileField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', None))
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', ''))
def test_urlfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'max_length': '"%(value)s" has more than %(limit_value)d characters.',
}
f = URLField(error_messages=e, max_length=17)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc.c')
self.assertFormErrors(['"http://djangoproject.com" has more than 17 characters.'], f.clean, 'djangoproject.com')
def test_booleanfield(self):
e = {
'required': 'REQUIRED',
}
f = BooleanField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
def test_choicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
}
f = ChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, 'b')
def test_multiplechoicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'invalid_list': 'NOT A LIST',
}
f = MultipleChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST'], f.clean, 'b')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, ['b'])
def test_splitdatetimefield(self):
e = {
'required': 'REQUIRED',
'invalid_date': 'INVALID DATE',
'invalid_time': 'INVALID TIME',
}
f = SplitDateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID DATE', 'INVALID TIME'], f.clean, ['a', 'b'])
@ignore_warnings(category=RemovedInDjango19Warning)
def test_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = IPAddressField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_generic_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = GenericIPAddressField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_subclassing_errorlist(self):
class TestForm(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def clean(self):
raise ValidationError("I like to be awkward.")
@python_2_unicode_compatible
class CustomErrorList(utils.ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return mark_safe('<div class="error">%s</div>' % ''.join('<p>%s</p>' % e for e in self))
# This form should print errors the default way.
form1 = TestForm({'first_name': 'John'})
self.assertHTMLEqual(str(form1['last_name'].errors), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertHTMLEqual(str(form1.errors['__all__']), '<ul class="errorlist nonfield"><li>I like to be awkward.</li></ul>')
# This one should wrap error groups in the customized way.
form2 = TestForm({'first_name': 'John'}, error_class=CustomErrorList)
self.assertHTMLEqual(str(form2['last_name'].errors), '<div class="error"><p>This field is required.</p></div>')
self.assertHTMLEqual(str(form2.errors['__all__']), '<div class="error"><p>I like to be awkward.</p></div>')
class ModelChoiceFieldErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_modelchoicefield(self):
# Create choices for the model choice field tests below.
from forms_tests.models import ChoiceModel
ChoiceModel.objects.create(pk=1, name='a')
ChoiceModel.objects.create(pk=2, name='b')
ChoiceModel.objects.create(pk=3, name='c')
# ModelChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': 'INVALID CHOICE',
}
f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID CHOICE'], f.clean, '4')
# ModelMultipleChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'list': 'NOT A LIST OF VALUES',
}
f = ModelMultipleChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST OF VALUES'], f.clean, '3')
self.assertFormErrors(['4 IS INVALID CHOICE'], f.clean, ['4'])
| |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.ambari import common as p_common
from sahara.plugins import utils
CORE_SITE = "core-site"
YARN_SITE = "yarn-site"
HBASE_SITE = "hbase-site"
HDFS_SITE = "hdfs-site"
HADOOP_ENV = "hadoop-env"
ZOO_CFG = "zoo.cfg"
def update_bp_ha_common(cluster, blueprint):
blueprint = _set_default_fs(cluster, blueprint, p_common.NAMENODE_HA)
blueprint = _set_high_zk_limits(blueprint)
return blueprint
def update_bp_for_namenode_ha(cluster, blueprint):
blueprint = _add_zkfc_to_namenodes(blueprint)
blueprint = _set_zk_quorum(cluster, blueprint, CORE_SITE)
blueprint = _configure_hdfs_site(cluster, blueprint)
return blueprint
def update_bp_for_resourcemanager_ha(cluster, blueprint):
blueprint = _configure_yarn_site(cluster, blueprint)
blueprint = _set_zk_quorum(cluster, blueprint, YARN_SITE)
blueprint = _set_default_fs(cluster, blueprint,
p_common.RESOURCEMANAGER_HA)
return blueprint
def update_bp_for_hbase_ha(cluster, blueprint):
return _confgure_hbase_site(cluster, blueprint)
def _add_zkfc_to_namenodes(blueprint):
for hg in blueprint["host_groups"]:
if {"name": "NAMENODE"} in hg["components"]:
hg["components"].append({"name": "ZKFC"})
return blueprint
def _find_create_properties_section(blueprint, section_name):
for conf_group in blueprint["configurations"]:
if section_name in conf_group:
return conf_group[section_name]
new_group = {section_name: {}}
blueprint["configurations"].append(new_group)
return new_group[section_name]
def _find_hdfs_site(blueprint):
return _find_create_properties_section(blueprint, HDFS_SITE)
def _find_yarn_site(blueprint):
return _find_create_properties_section(blueprint, YARN_SITE)
def _find_core_site(blueprint):
return _find_create_properties_section(blueprint, CORE_SITE)
def _find_hadoop_env(blueprint):
return _find_create_properties_section(blueprint, HADOOP_ENV)
def _find_zoo_cfg(blueprint):
return _find_create_properties_section(blueprint, ZOO_CFG)
def _find_hbase_site(blueprint):
return _find_create_properties_section(blueprint, HBASE_SITE)
def _set_default_fs(cluster, blueprint, ha_type):
if ha_type == p_common.NAMENODE_HA:
_find_core_site(blueprint)["fs.defaultFS"] = "hdfs://hdfs-ha"
elif ha_type == p_common.RESOURCEMANAGER_HA:
nn_instance = utils.get_instances(cluster, p_common.NAMENODE)[0]
_find_core_site(blueprint)["fs.defaultFS"] = (
"hdfs://%s:8020" % nn_instance.fqdn())
return blueprint
def _set_zk_quorum(cluster, blueprint, conf_type):
zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER)
value = ",".join(["%s:2181" % i.fqdn() for i in zk_instances])
if conf_type == CORE_SITE:
_find_core_site(blueprint)["ha.zookeeper.quorum"] = value
elif conf_type == YARN_SITE:
_find_yarn_site(blueprint)["hadoop.registry.zk.quorum"] = value
return blueprint
def _set_high_zk_limits(blueprint):
props = _find_zoo_cfg(blueprint)
props["tickTime"] = "10000"
return blueprint
def _set_primary_and_standby_namenode(cluster, blueprint):
props = _find_hadoop_env(blueprint)
nns = utils.get_instances(cluster, p_common.NAMENODE)
props["dfs_ha_initial_namenode_active"] = nns[0].fqdn()
props["dfs_ha_initial_namenode_standby"] = nns[1].fqdn()
return blueprint
def _configure_hdfs_site(cluster, blueprint):
props = _find_hdfs_site(blueprint)
props["dfs.client.failover.proxy.provider.hdfs-ha"] = (
"org.apache.hadoop.hdfs.server.namenode.ha."
"ConfiguredFailoverProxyProvider")
props["dfs.ha.automatic-failover.enabled"] = "true"
props["dfs.ha.fencing.methods"] = "shell(/bin/true)"
props["dfs.nameservices"] = "hdfs-ha"
jns = utils.get_instances(cluster, p_common.JOURNAL_NODE)
journalnodes_concat = ";".join(
["%s:8485" % i.fqdn() for i in jns])
journalnodes_value = "qjournal://%s/hdfs-ha" % journalnodes_concat
props["dfs.namenode.shared.edits.dir"] = journalnodes_value
nns = utils.get_instances(cluster, p_common.NAMENODE)
nn_id_concat = ",".join([i.instance_name for i in nns])
props["dfs.ha.namenodes.hdfs-ha"] = nn_id_concat
props["dfs.namenode.http-address"] = "%s:50070" % nns[0].fqdn()
props["dfs.namenode.https-address"] = "%s:50470" % nns[0].fqdn()
for i in nns:
props["dfs.namenode.http-address.hdfs-ha.%s" % i.instance_name] = (
"%s:50070" % i.fqdn())
props["dfs.namenode.https-address.hdfs-ha.%s" % i.instance_name] = (
"%s:50470" % i.fqdn())
props["dfs.namenode.rpc-address.hdfs-ha.%s" % i.instance_name] = (
"%s:8020" % i.fqdn())
return blueprint
def _configure_yarn_site(cluster, blueprint):
props = _find_yarn_site(blueprint)
name = cluster.name
rm_instances = utils.get_instances(cluster, p_common.RESOURCEMANAGER)
props["hadoop.registry.rm.enabled"] = "false"
zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER)
zks = ",".join(["%s:2181" % i.fqdn() for i in zk_instances])
props["yarn.resourcemanager.zk-address"] = zks
hs = utils.get_instance(cluster, p_common.HISTORYSERVER)
props["yarn.log.server.url"] = "%s:19888/jobhistory/logs/" % hs.fqdn()
props["yarn.resourcemanager.address"] = "%s:8050" % rm_instances[0].fqdn()
props["yarn.resourcemanager.admin.address"] = ("%s:8141" %
rm_instances[0].fqdn())
props["yarn.resourcemanager.cluster-id"] = name
props["yarn.resourcemanager.ha.automatic-failover.zk-base-path"] = (
"/yarn-leader-election")
props["yarn.resourcemanager.ha.enabled"] = "true"
rm_id_concat = ",".join([i.instance_name for i in rm_instances])
props["yarn.resourcemanager.ha.rm-ids"] = rm_id_concat
for i in rm_instances:
props["yarn.resourcemanager.hostname.%s" % i.instance_name] = i.fqdn()
props["yarn.resourcemanager.webapp.address.%s" %
i.instance_name] = "%s:8088" % i.fqdn()
props["yarn.resourcemanager.webapp.https.address.%s" %
i.instance_name] = "%s:8090" % i.fqdn()
props["yarn.resourcemanager.hostname"] = rm_instances[0].fqdn()
props["yarn.resourcemanager.recovery.enabled"] = "true"
props["yarn.resourcemanager.resource-tracker.address"] = (
"%s:8025" % rm_instances[0].fqdn())
props["yarn.resourcemanager.scheduler.address"] = (
"%s:8030" % rm_instances[0].fqdn())
props["yarn.resourcemanager.store.class"] = (
"org.apache.hadoop.yarn.server.resourcemanager.recovery."
"ZKRMStateStore")
props["yarn.resourcemanager.webapp.address"] = (
"%s:8088" % rm_instances[0].fqdn())
props["yarn.resourcemanager.webapp.https.address"] = (
"%s:8090" % rm_instances[0].fqdn())
tls_instance = utils.get_instance(cluster, p_common.APP_TIMELINE_SERVER)
props["yarn.timeline-service.address"] = "%s:10200" % tls_instance.fqdn()
props["yarn.timeline-service.webapp.address"] = (
"%s:8188" % tls_instance.fqdn())
props["yarn.timeline-service.webapp.https.address"] = (
"%s:8190" % tls_instance.fqdn())
return blueprint
def _confgure_hbase_site(cluster, blueprint):
props = _find_hbase_site(blueprint)
props["hbase.regionserver.global.memstore.lowerLimit"] = "0.38"
props["hbase.regionserver.global.memstore.upperLimit"] = "0.4"
props["hbase.regionserver.handler.count"] = "60"
props["hbase.regionserver.info.port"] = "60030"
props["hbase.regionserver.storefile.refresh.period"] = "20"
props["hbase.rootdir"] = "hdfs://hdfs-ha/apps/hbase/data"
props["hbase.security.authentication"] = "simple"
props["hbase.security.authorization"] = "false"
props["hbase.superuser"] = "hbase"
props["hbase.tmp.dir"] = "/hadoop/hbase"
props["hbase.zookeeper.property.clientPort"] = "2181"
zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER)
zk_quorum_value = ",".join([i.fqdn() for i in zk_instances])
props["hbase.zookeeper.quorum"] = zk_quorum_value
props["hbase.zookeeper.useMulti"] = "true"
props["hfile.block.cache.size"] = "0.40"
props["zookeeper.session.timeout"] = "30000"
props["zookeeper.znode.parent"] = "/hbase-unsecure"
return blueprint
| |
# coding: utf-8
from __future__ import division, unicode_literals
"""
Module containing analysis classes which compute a pourbaix diagram given a
target compound/element.
"""
from six.moves import zip
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "Sai Jayaraman"
__email__ = "sjayaram@mit.edu"
__status__ = "Development"
__date__ = "Nov 1, 2012"
import logging
import numpy as np
import itertools
from itertools import chain
from pyhull.convex_hull import ConvexHull
from pymatgen.analysis.pourbaix.entry import MultiEntry, ion_or_solid_comp_object
from pymatgen.core import Element, Composition
logger = logging.getLogger(__name__)
PREFAC = 0.0591
MU_H2O = -2.4583
class PourbaixDiagram(object):
"""
Class to create a Pourbaix diagram from entries
Args:
entries: Entries list containing both Solids and Ions
comp_dict: Dictionary of compositions
"""
def __init__(self, entries, comp_dict=None):
self._solid_entries = list()
self._ion_entries = list()
for entry in entries:
if entry.phase_type == "Solid":
self._solid_entries.append(entry)
elif entry.phase_type == "Ion":
self._ion_entries.append(entry)
else:
raise Exception("Incorrect Phase type - needs to be \
Pourbaix entry of phase type Ion/Solid")
self._unprocessed_entries = self._solid_entries + self._ion_entries
self._elt_comp = comp_dict
if comp_dict:
self._multielement = True
pbx_elements = set()
for comp in comp_dict.keys():
for el in [el for el in
ion_or_solid_comp_object(comp).elements
if el not in ["H", "O"]]:
pbx_elements.add(el.symbol)
self.pourbaix_elements = pbx_elements
w = [comp_dict[key] for key in comp_dict]
A = []
for comp in comp_dict:
comp_obj = ion_or_solid_comp_object(comp)
Ai = []
for elt in self.pourbaix_elements:
Ai.append(comp_obj[elt])
A.append(Ai)
A = np.array(A).T.astype(float)
w = np.array(w)
A /= np.dot([a.sum() for a in A], w)
x = np.linalg.solve(A, w)
self._elt_comp = dict(zip(self.pourbaix_elements, x))
else:
self._multielement = False
self.pourbaix_elements = [el.symbol
for el in entries[0].composition.elements
if el.symbol not in ["H", "O"]]
self._elt_comp = {self.pourbaix_elements[0]: 1.0}
self._make_pourbaixdiagram()
def _create_conv_hull_data(self):
"""
Make data conducive to convex hull generator.
"""
if self._multielement:
self._all_entries = self._process_multielement_entries()
else:
self._all_entries = self._unprocessed_entries
entries_to_process = list()
for entry in self._all_entries:
entry.scale(entry.normalization_factor)
entry.correction += (- MU_H2O * entry.nH2O + entry.conc_term)
entries_to_process.append(entry)
self._qhull_entries = entries_to_process
return self._process_conv_hull_data(entries_to_process)
def _process_conv_hull_data(self, entries_to_process):
"""
From a sequence of ion+solid entries, generate the necessary data
for generation of the convex hull.
"""
data = []
for entry in entries_to_process:
row = [entry.npH, entry.nPhi, entry.g0]
data.append(row)
temp = sorted(zip(data, self._qhull_entries),
key=lambda x: x[0][2])
[data, self._qhull_entries] = list(zip(*temp))
return data
def _process_multielement_entries(self):
"""
Create entries for multi-element Pourbaix construction
"""
N = len(self._elt_comp) # No. of elements
entries = self._unprocessed_entries
el_list = self._elt_comp.keys()
comp_list = [self._elt_comp[el] for el in el_list]
list_of_entries = list()
for j in range(1, N + 1):
list_of_entries += list(itertools.combinations(
list(range(len(entries))), j))
processed_entries = list()
for entry_list in list_of_entries:
# Check if all elements in composition list are present in
# entry_list
if not (set([Element(el) for el in el_list]).issubset(
set(list(chain.from_iterable([entries[i].composition.keys()
for i in entry_list]))))):
continue
if len(entry_list) == 1:
# If only one entry in entry_list, then check if the composition matches with the set composition.
entry = entries[entry_list[0]]
dict_of_non_oh = dict(zip([key for key in entry.composition.keys() if key.symbol not in ["O", "H"]],
[entry.composition[key] for key in [key for key in entry.composition.keys() if key.symbol not in ["O", "H"]]]))
if Composition(dict(zip(self._elt_comp.keys(), [self._elt_comp[key] / min([self._elt_comp[key] for key in self._elt_comp.keys()])
for key in self._elt_comp.keys()]))).reduced_formula ==\
Composition(dict(zip(dict_of_non_oh.keys(), [dict_of_non_oh[el] / min([dict_of_non_oh[key] for key in dict_of_non_oh.keys()])
for el in dict_of_non_oh.keys()]))).reduced_formula:
processed_entries.append(MultiEntry([entry], [1.0]))
continue
A = [[0.0] * (len(entry_list) - 1) for _ in range(len(entry_list) - 1)]
multi_entries = [entries[j] for j in entry_list]
entry0 = entries[entry_list[0]]
comp0 = entry0.composition
if entry0.phase_type == "Solid":
red_fac = comp0.get_reduced_composition_and_factor()[1]
else:
red_fac = 1.0
sum_nel = sum([comp0[el] / red_fac for el in el_list])
b = [comp0[Element(el_list[i])] / red_fac - comp_list[i] * sum_nel
for i in range(1, len(entry_list))]
for j in range(1, len(entry_list)):
entry = entries[entry_list[j]]
comp = entry.composition
if entry.phase_type == "Solid":
red_fac = comp.get_reduced_composition_and_factor()[1]
else:
red_fac = 1.0
sum_nel = sum([comp[el] / red_fac for el in el_list])
for i in range(1, len(entry_list)):
el = el_list[i]
A[i-1][j-1] = comp_list[i] * sum_nel -\
comp[Element(el)] / red_fac
try:
weights = np.linalg.solve(np.array(A), np.array(b))
except np.linalg.linalg.LinAlgError as err:
if 'Singular matrix' in err.message:
continue
else:
raise Exception("Unknown Error message!")
if not(np.all(weights > 0.0)):
continue
weights = list(weights)
weights.insert(0, 1.0)
super_entry = MultiEntry(multi_entries, weights)
processed_entries.append(super_entry)
return processed_entries
def _make_pourbaixdiagram(self):
"""
Calculates entries on the convex hull in the dual space.
"""
stable_entries = set()
self._qhull_data = self._create_conv_hull_data()
dim = len(self._qhull_data[0])
if len(self._qhull_data) < dim:
raise Exception("Can only do elements with at-least 3 entries"
" for now")
if len(self._qhull_data) == dim:
self._facets = [list(range(dim))]
else:
facets_pyhull = np.array(ConvexHull(self._qhull_data).vertices)
self._facets = np.sort(np.array(facets_pyhull))
logger.debug("Final facets are\n{}".format(self._facets))
logger.debug("Removing vertical facets...")
vert_facets_removed = list()
for facet in self._facets:
facetmatrix = np.zeros((len(facet), len(facet)))
count = 0
for vertex in facet:
facetmatrix[count] = np.array(self._qhull_data[vertex])
facetmatrix[count, dim - 1] = 1
count += 1
if abs(np.linalg.det(facetmatrix)) > 1e-8:
vert_facets_removed.append(facet)
else:
logger.debug("Removing vertical facet : {}".format(facet))
logger.debug("Removing UCH facets by eliminating normal.z >0 ...")
# Find center of hull
vertices = set()
for facet in vert_facets_removed:
for vertex in facet:
vertices.add(vertex)
c = [0.0, 0.0, 0.0]
c[0] = np.average([self._qhull_data[vertex][0]
for vertex in vertices])
c[1] = np.average([self._qhull_data[vertex][1]
for vertex in vertices])
c[2] = np.average([self._qhull_data[vertex][2]
for vertex in vertices])
# Shift origin to c
new_qhull_data = np.array(self._qhull_data)
for vertex in vertices:
new_qhull_data[vertex] -= c
# For each facet, find normal n, find dot product with P, and
# check if this is -ve
final_facets = list()
for facet in vert_facets_removed:
a = new_qhull_data[facet[1]] - new_qhull_data[facet[0]]
b = new_qhull_data[facet[2]] - new_qhull_data[facet[0]]
n = np.cross(a, b)
val = np.dot(n, new_qhull_data[facet[0]])
if val < 0:
n = -n
if n[2] <= 0:
final_facets.append(facet)
else:
logger.debug("Removing UCH facet : {}".format(facet))
final_facets = np.array(final_facets)
self._facets = final_facets
stable_vertices = set()
for facet in self._facets:
for vertex in facet:
stable_vertices.add(vertex)
stable_entries.add(self._qhull_entries[vertex])
self._stable_entries = stable_entries
self._vertices = stable_vertices
@property
def facets(self):
"""
Facets of the convex hull in the form of [[1,2,3],[4,5,6]...]
"""
return self._facets
@property
def qhull_data(self):
"""
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
"""
return self._qhull_data
@property
def qhull_entries(self):
"""
Return qhull entries
"""
return self._qhull_entries
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_entries)
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.qhull_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries
"""
return self._all_entries
@property
def vertices(self):
"""
Return vertices of the convex hull
"""
return self._vertices
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
| |
from __future__ import print_function
import zmq
import socket
import dill
import uuid
from collections import defaultdict
import itertools
from multiprocessing.pool import ThreadPool
import random
from datetime import datetime
from threading import Thread, Lock
from contextlib import contextmanager
import traceback
import sys
from ..compatibility import Queue, unicode, Empty
try:
import cPickle as pickle
except ImportError:
import pickle
from ..core import get_dependencies, flatten
from .. import core
from ..async import finish_task, start_state_from_dask as dag_state_from_dask
with open('log.scheduler', 'w') as f: # delete file
pass
def log(*args):
with open('log.scheduler', 'a') as f:
print('\n', *args, file=f)
@contextmanager
def logerrors():
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
log('Error!', str(e))
log('Traceback', str(tb))
raise
class Scheduler(object):
""" Disitributed scheduler for dask computations
Parameters
----------
hostname: string
hostname or IP address of this machine visible to outside world
port_to_workers: int
Port on which to listen to connections from workers
port_to_clients: int
Port on which to listen to connections from clients
bind_to_workers: string
Addresses from which we accept worker connections, defaults to *
bind_to_clients: string
Addresses from which we accept client connections, defaults to *
block: bool
Whether or not to block the process on creation
State
-----
workers - dict
Maps worker identities to information about that worker
who_has - dict
Maps data keys to sets of workers that own that data
worker_has - dict
Maps workers to data that they own
data - dict
Maps data keys to metadata about the computation that produced it
to_workers - zmq.Socket (ROUTER)
Socket to communicate to workers
to_clients - zmq.Socket (ROUTER)
Socket to communicate with users
collections - dict
Dict holding shared collections like bags and arrays
"""
def __init__(self, port_to_workers=None, port_to_clients=None,
bind_to_workers='*', bind_to_clients='*',
hostname=None, block=False):
self.context = zmq.Context()
hostname = hostname or socket.gethostname()
# Bind routers to addresses (and create addresses if necessary)
self.to_workers = self.context.socket(zmq.ROUTER)
if port_to_workers is None:
port_to_workers = self.to_workers.bind_to_random_port('tcp://' + bind_to_workers)
else:
self.to_workers.bind('tcp://%s:%d' % (bind_to_workers, port_to_workers))
self.address_to_workers = ('tcp://%s:%d' % (hostname, port_to_workers)).encode()
self.worker_poller = zmq.Poller()
self.worker_poller.register(self.to_workers, zmq.POLLIN)
self.to_clients = self.context.socket(zmq.ROUTER)
if port_to_clients is None:
port_to_clients = self.to_clients.bind_to_random_port('tcp://' + bind_to_clients)
else:
self.to_clients.bind('tcp://%s:%d' % (bind_to_clients, port_to_clients))
self.address_to_clients = ('tcp://%s:%d' % (hostname, port_to_clients)).encode()
# Client state
self.clients = dict()
# State about my workers and computed data
self.workers = dict()
self.who_has = defaultdict(set)
self.worker_has = defaultdict(set)
self.available_workers = Queue()
self.data = defaultdict(dict)
self.collections = dict()
self.send_to_workers_queue = Queue()
self.send_to_workers_recv = self.context.socket(zmq.PAIR)
_port = self.send_to_workers_recv.bind_to_random_port('tcp://127.0.0.1')
self.send_to_workers_send = self.context.socket(zmq.PAIR)
self.send_to_workers_send.connect('tcp://127.0.0.1:%d' % _port)
self.worker_poller.register(self.send_to_workers_recv, zmq.POLLIN)
self.pool = ThreadPool(100)
self.lock = Lock()
self.status = 'run'
self.queues = dict()
self._schedule_lock = Lock()
# RPC functions that workers and clients can trigger
self.worker_functions = {'register': self._worker_registration,
'heartbeat': self._heartbeat,
'status': self._status_to_worker,
'finished-task': self._worker_finished_task,
'setitem-ack': self._setitem_ack,
'getitem-ack': self._getitem_ack}
self.client_functions = {'status': self._status_to_client,
'get_workers': self._get_workers,
'register': self._client_registration,
'schedule': self._schedule_from_client,
'set-collection': self._set_collection,
'get-collection': self._get_collection,
'close': self._close}
# Away we go!
log(self.address_to_workers, 'Start')
self._listen_to_workers_thread = Thread(target=self._listen_to_workers)
self._listen_to_workers_thread.start()
self._listen_to_clients_thread = Thread(target=self._listen_to_clients)
self._listen_to_clients_thread.start()
if block:
self.block()
def _listen_to_workers(self):
""" Event loop: Listen to worker router """
while self.status != 'closed':
try:
socks = dict(self.worker_poller.poll(100))
if not socks:
continue
except zmq.ZMQError:
break
if self.send_to_workers_recv in socks:
self.send_to_workers_recv.recv()
while not self.send_to_workers_queue.empty():
msg = self.send_to_workers_queue.get()
self.to_workers.send_multipart(msg)
self.send_to_workers_queue.task_done()
if self.to_workers in socks:
address, header, payload = self.to_workers.recv_multipart()
header = pickle.loads(header)
if 'address' not in header:
header['address'] = address
log(self.address_to_workers, 'Receive job from worker', header)
try:
function = self.worker_functions[header['function']]
except KeyError:
log(self.address_to_workers, 'Unknown function', header)
else:
future = self.pool.apply_async(function, args=(header, payload))
def _listen_to_clients(self):
""" Event loop: Listen to client router """
while self.status != 'closed':
try:
if not self.to_clients.poll(100): # is this threadsafe?
continue
except zmq.ZMQError:
break
with self.lock:
address, header, payload = self.to_clients.recv_multipart()
header = pickle.loads(header)
if 'address' not in header:
header['address'] = address
log(self.address_to_clients, 'Receive job from client', header)
try:
function = self.client_functions[header['function']]
except KeyError:
log(self.address_to_clients, 'Unknown function', header)
else:
self.pool.apply_async(function, args=(header, payload))
def block(self):
""" Block until listener threads close
Warning: If some other thread doesn't call `.close()` then, in the
common case you can not easily escape from this.
"""
self._listen_to_workers_thread.join()
self._listen_to_clients_thread.join()
def _client_registration(self, header, payload):
""" Client comes in, register it, send back info about the cluster"""
payload = pickle.loads(payload)
address = header['address']
self.clients[address] = payload
out_header = {}
out_payload = {'workers': self.workers}
self.send_to_client(header['address'], out_header, out_payload)
def _worker_registration(self, header, payload):
""" Worker came in, register them """
payload = pickle.loads(payload)
address = header['address']
self.workers[address] = payload
self.available_workers.put(address)
def _worker_finished_task(self, header, payload):
""" Worker reports back as having finished task, ready for more
See also:
Scheduler.trigger_task
Scheduler.schedule
"""
with logerrors():
address = header['address']
payload = pickle.loads(payload)
key = payload['key']
duration = payload['duration']
dependencies = payload['dependencies']
log(self.address_to_workers, 'Finish task', payload)
for dep in dependencies:
self.who_has[dep].add(address)
self.worker_has[address].add(dep)
self.available_workers.put(address)
if isinstance(payload['status'], Exception):
self.queues[payload['queue']].put(payload)
else:
self.data[key]['duration'] = duration
self.who_has[key].add(address)
self.worker_has[address].add(key)
self.queues[payload['queue']].put(payload)
def _status_to_client(self, header, payload):
with logerrors():
out_header = {'jobid': header.get('jobid')}
log(self.address_to_clients, 'Status')
self.send_to_client(header['address'], out_header, 'OK')
def _status_to_worker(self, header, payload):
out_header = {'jobid': header.get('jobid')}
log(self.address_to_workers, 'Status sending')
self.send_to_worker(header['address'], out_header, 'OK')
def send_to_worker(self, address, header, payload):
""" Send packet to worker """
log(self.address_to_workers, 'Send to worker', address, header)
header['address'] = self.address_to_workers
loads = header.get('loads', pickle.loads)
dumps = header.get('dumps', pickle.dumps)
if isinstance(address, unicode):
address = address.encode()
header['timestamp'] = datetime.utcnow()
self.send_to_workers_queue.put([address,
pickle.dumps(header),
dumps(payload)])
self.send_to_workers_send.send(b'')
def send_to_client(self, address, header, result):
""" Send packet to client """
log(self.address_to_clients, 'Send to client', address, header)
header['address'] = self.address_to_clients
loads = header.get('loads', pickle.loads)
dumps = header.get('dumps', pickle.dumps)
if isinstance(address, unicode):
address = address.encode()
header['timestamp'] = datetime.utcnow()
with self.lock:
self.to_clients.send_multipart([address,
pickle.dumps(header),
dumps(result)])
def trigger_task(self, dsk, key, queue):
""" Send a single task to the next available worker
See also:
Scheduler.schedule
Scheduler.worker_finished_task
"""
deps = get_dependencies(dsk, key)
worker = self.available_workers.get()
locations = dict((dep, self.who_has[dep]) for dep in deps)
header = {'function': 'compute', 'jobid': key,
'dumps': dill.dumps, 'loads': dill.loads}
payload = {'key': key, 'task': dsk[key], 'locations': locations,
'queue': queue}
self.send_to_worker(worker, header, payload)
def release_key(self, key):
""" Release data from all workers
Example
-------
>>> scheduler.release_key('x') # doctest: +SKIP
Protocol
--------
This sends a 'delitem' request to all workers known to have this key.
This operation is fire-and-forget. Local indices will be updated
immediately.
"""
with logerrors():
workers = list(self.who_has[key])
log(self.address_to_workers, 'Release data', key, workers)
header = {'function': 'delitem', 'jobid': key}
payload = {'key': key}
for worker in workers:
self.send_to_worker(worker, header, payload)
self.who_has[key].remove(worker)
self.worker_has[worker].remove(key)
def send_data(self, key, value, address=None, reply=True):
""" Send data up to some worker
If no address is given we select one worker randomly
Example
-------
>>> scheduler.send_data('x', 10) # doctest: +SKIP
>>> scheduler.send_data('x', 10, 'tcp://bob:5000', reply=False) # doctest: +SKIP
Protocol
--------
1. Scheduler makes a queue
2. Scheduler selects a worker at random (or uses prespecified worker)
3. Scheduler sends 'setitem' operation to that worker
{'key': ..., 'value': ..., 'queue': ...}
4. Worker gets data and stores locally, send 'setitem-ack'
{'key': ..., 'queue': ...}
5. Scheduler gets from queue, send_data cleans up queue and returns
See also:
Scheduler.setitem_ack
Worker.setitem
Scheduler.scatter
"""
if reply:
queue = Queue()
qkey = str(uuid.uuid1())
self.queues[qkey] = queue
else:
qkey = None
if address is None:
address = random.choice(list(self.workers))
header = {'function': 'setitem', 'jobid': key}
payload = {'key': key, 'value': value, 'queue': qkey}
self.send_to_worker(address, header, payload)
if reply:
queue.get()
del self.queues[qkey]
def scatter(self, key_value_pairs, block=True):
""" Scatter data to workers
Parameters
----------
key_value_pairs: Iterator or dict
Data to send
block: bool
Block on completion or return immediately (defaults to True)
Example
-------
>>> scheduler.scatter({'x': 1, 'y': 2}) # doctest: +SKIP
Protocol
--------
1. Scheduler starts up a uniquely identified queue.
2. Scheduler sends 'setitem' requests to workers with
{'key': ..., 'value': ... 'queue': ...}
3. Scheduler waits on queue for all responses
4. Workers receive 'setitem' requests, send back on 'setitem-ack' with
{'key': ..., 'queue': ...}
5. Scheduler's 'setitem-ack' function pushes keys into the queue
6. Once the same number of replies is heard scheduler scatter function
returns
7. Scheduler cleans up queue
See Also:
Scheduler.setitem_ack
Worker.setitem_scheduler
"""
workers = list(self.workers)
log(self.address_to_workers, 'Scatter', workers, key_value_pairs)
workers = itertools.cycle(workers)
if isinstance(key_value_pairs, dict):
key_value_pairs = key_value_pairs.items()
queue = Queue()
qkey = str(uuid.uuid1())
self.queues[qkey] = queue
counter = 0
for (k, v), w in zip(key_value_pairs, workers):
header = {'function': 'setitem', 'jobid': k}
payload = {'key': k, 'value': v}
if block:
payload['queue'] = qkey
self.send_to_worker(w, header, payload)
counter += 1
if block:
for i in range(counter):
queue.get()
del self.queues[qkey]
def gather(self, keys):
""" Gather data from workers
Parameters
----------
keys: key, list of keys, nested list of lists of keys
Keys to collect from workers
Example
-------
>>> scheduler.gather('x') # doctest: +SKIP
>>> scheduler.gather([['x', 'y'], ['z']]) # doctest: +SKIP
Protocol
--------
1. Scheduler starts up a uniquely identified queue.
2. Scheduler sends 'getitem' requests to workers with payloads
{'key': ..., 'queue': ...}
3. Scheduler waits on queue for all responses
3. Workers receive 'getitem' requests, send data back on 'getitem-ack'
{'key': ..., 'value': ..., 'queue': ...}
4. Scheduler's 'getitem-ack' function pushes key/value pairs onto queue
5. Once the same number of replies is heard the gather function
collects data into form specified by keys input and returns
6. Scheduler cleans up queue before returning
See Also:
Scheduler.getitem_ack
Worker.getitem_scheduler
"""
qkey = str(uuid.uuid1())
queue = Queue()
self.queues[qkey] = queue
# Send of requests
self._gather_send(qkey, keys)
# Wait for replies
cache = dict()
for i in flatten(keys):
k, v = queue.get()
cache[k] = v
del self.queues[qkey]
# Reshape to keys
return core.get(cache, keys)
def _gather_send(self, qkey, key):
if isinstance(key, list):
for k in key:
self._gather_send(qkey, k)
else:
header = {'function': 'getitem', 'jobid': key}
payload = {'key': key, 'queue': qkey}
seq = list(self.who_has[key])
worker = random.choice(seq)
self.send_to_worker(worker, header, payload)
def _getitem_ack(self, header, payload):
""" Receive acknowledgement from worker about a getitem request
See also:
Scheduler.gather
Worker.getitem
"""
payload = pickle.loads(payload)
log(self.address_to_workers, 'Getitem ack', payload['key'],
payload['queue'])
with logerrors():
assert header['status'] == 'OK'
self.queues[payload['queue']].put((payload['key'],
payload['value']))
def _setitem_ack(self, header, payload):
""" Receive acknowledgement from worker about a setitem request
See also:
Scheduler.scatter
Worker.setitem
"""
address = header['address']
payload = pickle.loads(payload)
key = payload['key']
self.who_has[key].add(address)
self.worker_has[address].add(key)
queue = payload.get('queue')
if queue:
self.queues[queue].put(key)
def close_workers(self):
header = {'function': 'close'}
for w in self.workers:
self.send_to_worker(w, header, {})
self.workers.clear()
self.send_to_workers_queue.join()
def _close(self, header, payload):
self.close()
def close(self):
""" Close Scheduler """
self.close_workers()
self.status = 'closed'
self.to_workers.close(linger=1)
self.to_clients.close(linger=1)
self.send_to_workers_send.close(linger=1)
self.send_to_workers_recv.close(linger=1)
self.pool.close()
self.pool.join()
self.block()
self.context.destroy(linger=3)
def schedule(self, dsk, result, **kwargs):
""" Execute dask graph against workers
Parameters
----------
dsk: dict
Dask graph
result: list
keys to return (possibly nested)
Example
-------
>>> scheduler.get({'x': 1, 'y': (add, 'x', 2)}, 'y') # doctest: +SKIP
3
Protocol
--------
1. Scheduler scatters precomputed data in graph to workers
e.g. nodes like ``{'x': 1}``. See Scheduler.scatter
2.
"""
with self._schedule_lock:
log(self.address_to_workers, "Scheduling dask")
if isinstance(result, list):
result_flat = set(flatten(result))
else:
result_flat = set([result])
results = set(result_flat)
cache = dict()
dag_state = dag_state_from_dask(dsk, cache=cache)
if cache:
self.scatter(cache.items()) # send data in dask up to workers
tick = [0]
if dag_state['waiting'] and not dag_state['ready']:
raise ValueError("Found no accessible jobs in dask graph")
event_queue = Queue()
qkey = str(uuid.uuid1())
self.queues[qkey] = event_queue
def fire_task():
tick[0] += 1 # Update heartbeat
# Choose a good task to compute
key = dag_state['ready'].pop()
dag_state['ready-set'].remove(key)
dag_state['running'].add(key)
self.trigger_task(dsk, key, qkey) # Fire
try:
worker = self.available_workers.get(timeout=20)
self.available_workers.put(worker) # put him back in
except Empty:
raise ValueError("Waited 20 seconds. No workers found")
# Seed initial tasks
while dag_state['ready'] and self.available_workers.qsize() > 0:
fire_task()
# Main loop, wait on tasks to finish, insert new ones
while dag_state['waiting'] or dag_state['ready'] or dag_state['running']:
payload = event_queue.get()
if isinstance(payload['status'], Exception):
raise payload['status']
key = payload['key']
finish_task(dsk, key, dag_state, results,
release_data=self._release_data)
while dag_state['ready'] and self.available_workers.qsize() > 0:
fire_task()
result2 = self.gather(result)
for key in flatten(result): # release result data from workers
self.release_key(key)
return result2
def _schedule_from_client(self, header, payload):
"""
Input Payload: keys, dask
Output Payload: keys, result
Sent to client on 'schedule-ack'
"""
with logerrors():
loads = header.get('loads', dill.loads)
payload = loads(payload)
address = header['address']
dsk = payload['dask']
keys = payload['keys']
header2 = {'jobid': header.get('jobid'),
'function': 'schedule-ack'}
try:
result = self.schedule(dsk, keys)
header2['status'] = 'OK'
except Exception as e:
result = e
header2['status'] = 'Error'
payload2 = {'keys': keys, 'result': result}
self.send_to_client(address, header2, payload2)
def _release_data(self, key, state, delete=True):
""" Remove data from temporary storage during scheduling run
See Also
Scheduler.schedule
dask.async.finish_task
"""
if key in state['waiting_data']:
assert not state['waiting_data'][key]
del state['waiting_data'][key]
state['released'].add(key)
if delete:
self.release_key(key)
def _set_collection(self, header, payload):
with logerrors():
log(self.address_to_clients, "Set collection", header)
payload = header.get('loads', dill.loads)(payload)
self.collections[payload['name']] = payload
self.send_to_client(header['address'], {'status': 'OK'}, {})
def _get_collection(self, header, payload):
with logerrors():
log(self.address_to_clients, "Get collection", header)
payload = header.get('loads', pickle.loads)(payload)
payload2 = self.collections[payload['name']]
header2 = {'status': 'OK',
'loads': dill.loads,
'dumps': dill.dumps}
self.send_to_client(header['address'], header2, payload2)
def _get_workers(self, header, payload):
with logerrors():
log(self.address_to_clients, "Get workers", header)
self.send_to_client(header['address'],
{'status': 'OK'},
{'workers': self.workers})
def _heartbeat(self, header, payload):
with logerrors():
log(self.address_to_clients, "Heartbeat", header)
address = header['address']
self.workers[address]['last-seen'] = datetime.utcnow()
| |
#
# The Python Imaging Library.
# $Id$
#
##
# Image plugin for Palm pixmap images (output only).
##
from . import Image, ImageFile
from ._binary import o8, o16be as o16b
# fmt: off
_Palm8BitColormapValues = (
(255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255),
(255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204),
(255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204),
(255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153),
(255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255),
(204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255),
(204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204),
(204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153),
(204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153),
(153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255),
(153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204),
(153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204),
(153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153),
(153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255),
(102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255),
(102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204),
(102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153),
(102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153),
(51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255),
(51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204),
(51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204),
(51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153),
(51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255),
(0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255),
(0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204),
(0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153),
(0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153),
(255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102),
(255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51),
(255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51),
(255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0),
(255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102),
(204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102),
(204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51),
(204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0),
(204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0),
(153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102),
(153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51),
(153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51),
(153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0),
(153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102),
(102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102),
(102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51),
(102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0),
(102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0),
(51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102),
(51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51),
(51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51),
(51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0),
(51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102),
(0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102),
(0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51),
(0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0),
(0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17),
(34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119),
(136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221),
(238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128),
(0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0))
# fmt: on
# so build a prototype image to be used for palette resampling
def build_prototype_image():
image = Image.new("L", (1, len(_Palm8BitColormapValues)))
image.putdata(list(range(len(_Palm8BitColormapValues))))
palettedata = ()
for colormapValue in _Palm8BitColormapValues:
palettedata += colormapValue
palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues))
image.putpalette(palettedata)
return image
Palm8BitColormapImage = build_prototype_image()
# OK, we now have in Palm8BitColormapImage,
# a "P"-mode image with the right palette
#
# --------------------------------------------------------------------
_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000}
_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00}
#
# --------------------------------------------------------------------
##
# (Internal) Image save plugin for the Palm format.
def _save(im, fp, filename):
if im.mode == "P":
# we assume this is a color Palm image with the standard colormap,
# unless the "info" dict has a "custom-colormap" field
rawmode = "P"
bpp = 8
version = 1
elif im.mode == "L":
if im.encoderinfo.get("bpp") in (1, 2, 4):
# this is 8-bit grayscale, so we shift it to get the high-order bits,
# and invert it because
# Palm does greyscale from white (0) to black (1)
bpp = im.encoderinfo["bpp"]
im = im.point(
lambda x, shift=8 - bpp, maxval=(1 << bpp) - 1: maxval - (x >> shift)
)
elif im.info.get("bpp") in (1, 2, 4):
# here we assume that even though the inherent mode is 8-bit grayscale,
# only the lower bpp bits are significant.
# We invert them to match the Palm.
bpp = im.info["bpp"]
im = im.point(lambda x, maxval=(1 << bpp) - 1: maxval - (x & maxval))
else:
raise OSError("cannot write mode %s as Palm" % im.mode)
# we ignore the palette here
im.mode = "P"
rawmode = "P;" + str(bpp)
version = 1
elif im.mode == "1":
# monochrome -- write it inverted, as is the Palm standard
rawmode = "1;I"
bpp = 1
version = 0
else:
raise OSError("cannot write mode %s as Palm" % im.mode)
#
# make sure image data is available
im.load()
# write header
cols = im.size[0]
rows = im.size[1]
rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2
transparent_index = 0
compression_type = _COMPRESSION_TYPES["none"]
flags = 0
if im.mode == "P" and "custom-colormap" in im.info:
flags = flags & _FLAGS["custom-colormap"]
colormapsize = 4 * 256 + 2
colormapmode = im.palette.mode
colormap = im.getdata().getpalette()
else:
colormapsize = 0
if "offset" in im.info:
offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4
else:
offset = 0
fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags))
fp.write(o8(bpp))
fp.write(o8(version))
fp.write(o16b(offset))
fp.write(o8(transparent_index))
fp.write(o8(compression_type))
fp.write(o16b(0)) # reserved by Palm
# now write colormap if necessary
if colormapsize > 0:
fp.write(o16b(256))
for i in range(256):
fp.write(o8(i))
if colormapmode == "RGB":
fp.write(
o8(colormap[3 * i])
+ o8(colormap[3 * i + 1])
+ o8(colormap[3 * i + 2])
)
elif colormapmode == "RGBA":
fp.write(
o8(colormap[4 * i])
+ o8(colormap[4 * i + 1])
+ o8(colormap[4 * i + 2])
)
# now convert data to raw form
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))])
if hasattr(fp, "flush"):
fp.flush()
#
# --------------------------------------------------------------------
Image.register_save("Palm", _save)
Image.register_extension("Palm", ".palm")
Image.register_mime("Palm", "image/palm")
| |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Mon 13 Aug 2012 16:19:18 CEST
"""This module defines, among other less important constructions, a management
interface that can be used by Bob to display information about the database and
manage installed files.
"""
import os
import abc
import six
def dbshell(arguments):
"""Drops you into a database shell"""
if len(arguments.files) != 1:
raise RuntimeError(
"Something is wrong this database is supposed to be of type SQLite, but you have more than one data file available: %s" % argument.files)
if arguments.type == 'sqlite':
prog = 'sqlite3'
else:
raise RuntimeError("Error auxiliary database file '%s' cannot be used to initiate a database shell connection (type='%s')" % (
dbfile, arguments.type))
cmdline = [prog, arguments.files[0]]
import subprocess
try:
if arguments.dryrun:
print("[dry-run] exec '%s'" % ' '.join(cmdline))
return 0
else:
p = subprocess.Popen(cmdline)
except OSError as e:
# occurs when the file is not executable or not found
print("Error executing '%s': %s (%d)" % (' '.join(cmdline), e.strerror,
e.errno))
import sys
sys.exit(e.errno)
try:
p.communicate()
except KeyboardInterrupt: # the user CTRL-C'ed
import signal
os.kill(p.pid, signal.SIGTERM)
return signal.SIGTERM
return p.returncode
def dbshell_command(subparsers):
"""Adds a new dbshell subcommand to your subparser"""
parser = subparsers.add_parser('dbshell', help=dbshell.__doc__)
parser.add_argument("-n", "--dry-run", dest="dryrun", default=False,
action='store_true',
help="does not actually run, just prints what would do instead")
parser.set_defaults(func=dbshell)
def upload(arguments):
"""Uploads generated metadata to the Idiap build server"""
import pkg_resources
basedir = pkg_resources.resource_filename('bob.db.%s' % arguments.name, '')
assert basedir, "Database and package names do not match. Your declared " \
"database name should be <name>, if your package is called bob.db.<name>"
# check all files exist
for p in arguments.files:
if not os.path.exists(p):
raise IOError("Metadata file `%s' is not available. Did you run "
"`create' before attempting to upload?" % (p,))
# compress
import tarfile
import tempfile
import six.moves.urllib
import six.moves.http_client
import shutil
parsed_url = six.moves.urllib.parse.urlparse(arguments.url)
with tempfile.TemporaryFile() as tmpfile:
# if you get here, all files are there, ready to package
print("Compressing metadata files to temporary file...")
f = tarfile.open(fileobj=tmpfile, mode='w:bz2')
for k, p in enumerate(arguments.files):
n = os.path.relpath(p, basedir)
print("+ [%d/%d] %s" % (k + 1, len(arguments.files), n))
f.add(p, n)
f.close()
tmpfile.seek(0)
# print what we are going to do
target_path = '/'.join((parsed_url.path, arguments.name + ".tar.bz2"))
print("Uploading protocol files to %s" % target_path)
if parsed_url.scheme in ('', 'file'): #local file upload
try:
shutil.copyfileobj(tmpfile, open(target_path, 'wb'))
return
except (shutil.Error, IOError) as e:
# maybe no file location? try next steps
print("Error: %s" % e)
# if you get to this point, it is because it is a network transfer
if parsed_url.scheme == 'https':
dav_server = six.moves.http_client.HTTPSConnection(parsed_url.netloc)
else:
dav_server = six.moves.http_client.HTTPConnection(parsed_url.netloc)
# copy tmpfile to DAV server
import base64
import getpass
from six.moves import input
print("Authorization requested by server %s" % parsed_url.netloc)
username = input('Username: ')
username = username.encode('ascii')
password = getpass.getpass(prompt='Password: ')
password = password.encode('ascii')
upass = base64.encodestring(b'%s:%s' % \
(username, password)).decode('ascii')[:-1]
headers = {'Authorization': 'Basic %s' % upass}
dav_server.request('PUT', target_path, tmpfile, headers=headers)
res = dav_server.getresponse()
response = res.read()
dav_server.close()
if not (200 <= res.status < 300):
raise IOError(response)
else:
print("Uploaded %s (status: %d)" % (target_path, res.status))
def upload_command(subparsers):
"""Adds a new 'upload' subcommand to your parser"""
# default destination for the file to be uploaded is on the local directory
curdir = 'file://' + os.path.realpath(os.curdir)
parser = subparsers.add_parser('upload', help=upload.__doc__)
parser.add_argument("url", default=curdir, nargs='?', help='Pass the URL ' \
'for uploading your contribution (if not set, uses default: ' \
'\'%(default)s\')')
parser.set_defaults(func=upload)
return parser
def download(arguments):
"""Downloads and uncompresses meta data generated files from Idiap
Parameters:
arguments (argparse.Namespace): A set of arguments passed by the
command-line parser
Returns:
int: A POSIX compliant return value of ``0`` if the download is successful,
or ``1`` in case it is not.
Raises:
IOError: if metafiles exist and ``--force`` was not passed
urllib2.HTTPError: if the target resource does not exist on the webserver
"""
# What should happen as a combination of flags. Legend:
#
# 0 - Exit, with status 0
# X - Download, overwrite if there
# R - Raise exception, err
#
# +----------+-----------+----------+--------+
# | complete | --missing | --force | none |
# +----------+-----------+----------+--------+
# | yes | 0 | X | R |
# +----------+-----------+----------+--------+
# | no | X | X | X |
# +----------+-----------+----------+--------+
if not arguments.files:
print("Skipping download of metadata files for bob.db.%s: no files "
"declared" % arguments.name)
# Check we're complete in terms of metafiles
complete = True
for p in arguments.files:
if not os.path.exists(p):
complete = False
break
if complete:
if arguments.missing:
print("Skipping download of metadata files for `bob.db.%s': complete" %
arguments.name)
return 0
elif arguments.force:
print("Re-downloading metafiles for `bob.db.%s'" % arguments.name)
else:
raise IOError("Metadata files are already available. Remove metadata "
"files before attempting download or --force")
# if you get here, all files aren't there, unpack
source_url = os.path.join(arguments.source, arguments.name + ".tar.bz2")
target_dir = arguments.test_dir # test case
if not target_dir: # puts files on the root of the installed package
import pkg_resources
try:
target_dir = pkg_resources.resource_filename('bob.db.%s' %
arguments.name, '')
except ImportError as e:
raise ImportError("The package `bob.db.%s' is not currently "
"installed. N.B.: The database and package names **must** "
"match. Your package should be named `bob.db.%s', if the driver "
"name for your database is `%s'. Check." % (3 * (arguments.name,)))
# download file from Idiap server, unpack and remove it
import sys
import tempfile
import tarfile
import pkg_resources
from .utils import safe_tarmembers
if sys.version_info[0] <= 2:
import urllib2 as urllib
else:
import urllib.request as urllib
print ("Extracting url `%s' into `%s'" % (source_url, target_dir))
u = urllib.urlopen(source_url)
f = tempfile.NamedTemporaryFile(suffix=".tar.bz2")
open(f.name, 'wb').write(u.read())
t = tarfile.open(fileobj=f, mode='r:bz2')
members = list(safe_tarmembers(t))
for k, m in enumerate(members):
print("x [%d/%d] %s" % (k + 1, len(members), m.name,))
t.extract(m, target_dir)
t.close()
f.close()
def download_command(subparsers):
"""Adds a new 'download' subcommand to your parser"""
from argparse import SUPPRESS
if 'DOCSERVER' in os.environ:
USE_SERVER = os.environ['DOCSERVER']
else:
USE_SERVER = 'https://www.idiap.ch'
parser = subparsers.add_parser('download', help=download.__doc__)
parser.add_argument("--source",
default="%s/software/bob/databases/latest/" % USE_SERVER)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--force", action='store_true',
default=False, help="Overwrite existing database files?")
group.add_argument("--missing", action='store_true',
default=False, help="Only downloads if files are missing")
parser.add_argument("--test-dir", help=SUPPRESS)
parser.set_defaults(func=download)
return parser
def print_files(arguments):
"""Prints the current location of raw database files."""
print ("Files for database '%s':" % arguments.name)
for k in arguments.files:
print(k)
return 0
def files_command(subparsers):
"""Adds a new 'files' subcommand to your parser"""
parser = subparsers.add_parser('files', help=print_files.__doc__)
parser.set_defaults(func=print_files)
return parser
def version(arguments):
"""Outputs the database version"""
print('%s == %s' % (arguments.name, arguments.version))
return 0
def version_command(subparsers):
parser = subparsers.add_parser('version', help=version.__doc__)
parser.set_defaults(func=version)
return parser
@six.add_metaclass(abc.ABCMeta)
class Interface(object):
"""Base manager for Bob databases
You should derive and implement an Interface object on every ``bob.db``
package you create.
"""
@abc.abstractmethod
def name(self):
'''The name of this database
Returns:
str: a Python-conforming name for this database. This **must** match the
package name. If the package is named ``bob.db.foo``, then this function
must return ``foo``.
'''
return
@abc.abstractmethod
def files(self):
'''List of meta-data files for the package to be downloaded/uploaded
This function should normally return an empty list, except in case the
database being implemented requires download/upload of metadata files that
are **not** kept in its (git) repository.
Returns:
list: A python iterable with all metadata files needed. The paths listed
by this method should correspond to full paths (not relative ones) w.r.t.
the database package implementing it. This is normally achieved by using
``pkg_resources.resource_filename()``.
'''
return
@abc.abstractmethod
def version(self):
'''The version of this package
Returns:
str: The current version number defined in ``setup.py``
'''
return
@abc.abstractmethod
def type(self):
'''The type of auxiliary files you have for this database
Returns:
str: A string defining the type of database implemented. You can return
only two values on this function, either ``sqlite`` or ``text``. If you
return ``sqlite``, then we append special actions such as ``dbshell`` on
``bob_dbmanage`` automatically for you. Otherwise, we don't.
'''
return
def setup_parser(self, parser, short_description, long_description):
'''Sets up the base parser for this database.
Parameters:
short_description (str): A short description (one-liner) for this
database
long_description (str): A more involved explanation of this database
Returns:
argparse.ArgumentParser: a subparser, ready so you can add commands on
'''
from argparse import RawDescriptionHelpFormatter
# creates a top-level parser for this database
top_level = parser.add_parser(self.name(),
formatter_class=RawDescriptionHelpFormatter,
help=short_description, description=long_description)
type = self.type()
files = self.files()
top_level.set_defaults(name=self.name())
top_level.set_defaults(version=self.version())
top_level.set_defaults(type=type)
top_level.set_defaults(files=files)
subparsers = top_level.add_subparsers(title="subcommands")
# adds some stock commands
version_command(subparsers)
if files:
upload_command(subparsers)
download_command(subparsers)
if type in ('sqlite',):
dbshell_command(subparsers)
if files is not None:
files_command(subparsers)
return subparsers
@abc.abstractmethod
def add_commands(self, parser):
'''Adds commands to a given :py:class:`argparse.ArgumentParser`
This method, effectively, allows you to define special commands that your
database will be able to perform when called from the common driver like
for example ``create`` or ``checkfiles``.
You are not obliged to overwrite this method. If you do, you will have the
chance to establish your own commands. You don't have to worry about stock
commands such as :py:meth:`files` or :py:meth:`version`. They will be
automatically hooked-in depending on the values you return for
:py:meth:`type` and :py:meth:`files`.
Parameters:
parser (argparse.ArgumentParser): An instance of a parser that you can
customize, i.e., call :py:meth:`argparse.ArgumentParser.add_argument`
on.
'''
return
__all__ = ('Interface',)
| |
# Libraries built in to Google Apps Engine
import sys
import os
import cgi
import webapp2
import urllib2
import ee
import config
import json
# Libraries that we need to provide ourselves in the libs folder
rootdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(rootdir, 'libs'))
from bs4 import BeautifulSoup
# TODO: Move this!
STORAGE_URL = 'http://byss.arc.nasa.gov/smcmich1/cmt_detections/'
feed_url = STORAGE_URL + 'daily_flood_detect_feed.kml'
# Go ahead and load the HTML files for later use.
with open('index.html', 'r') as f:
PAGE_HTML = f.read()
with open('map.html', 'r') as f:
MAP_HTML = f.read()
def renderHtml(html, pairList):
'''Simple alternative to html template rendering software'''
for pair in pairList:
html = html.replace(pair[0], pair[1])
return html
def fetchDateList(datesOnly=False):
'''Fetches the list of available dates'''
dateList = []
parsedIndexPage = BeautifulSoup(urllib2.urlopen(STORAGE_URL).read(), 'html.parser')
for line in parsedIndexPage.findAll('a'):
dateString = line.string
if datesOnly:
dateList.append(dateString)
continue
# Else look through each page so we can make date__location pairs.
subUrl = STORAGE_URL + dateString
try:
parsedSubPage = BeautifulSoup(urllib2.urlopen((subUrl)).read(), 'html.parser')
for line in parsedSubPage.findAll('a'):
kmlName = line.string
info = extractInfoFromKmlUrl(kmlName)
# Store combined date/location string.
displayString = dateString +'__'+ info['location']
dateList.append(displayString)
except:
pass # Ignore pages that we fail to parse
return dateList
def getKmlUrlsForKey(key):
'''Fetches all the kml files from a given date.
If the dateString includes a location, only fetch the matching URL.
Otherwise return all URLs for that date.'''
# The key contains the date and optionally the location
if '__' in key:
parts = key.split('__')
dateString = parts[0]
location = parts[1]
else:
dateString = key
location = None
kmlList = []
subUrl = STORAGE_URL + dateString
parsedSubPage = BeautifulSoup(urllib2.urlopen((subUrl)).read(), 'html.parser')
for line in parsedSubPage.findAll('a'):
kmlName = line.string
fullUrl = os.path.join(subUrl, kmlName)
# If this location matches a provided location, just return this URL.
if location and (location in kmlName):
return [fullUrl]
else:
kmlList.append(fullUrl)
return kmlList
def extractInfoFromKmlUrl(url):
'''Extract the information encoded in the KML filename into a dictionary.'''
# Format is: 'STUFF/results_location_SENSORS_%05f_%05f.kml'
# Get just the kml name
rslash = url.rfind('/')
if rslash:
filename = url[rslash+1:]
else:
filename = url
# Split up the kml name
parts = filename.split('_')
parts[-1] = parts[-1].replace('.kml','')
location = parts[1]
if len(parts) == 5:
sensors = parts[2]
else:
sensors = ''
lon = float(parts[-2])
lat = float(parts[-1])
# Pack the results into a dictionary
return {'location':location, 'sensors':sensors, 'lon':lon, 'lat':lat}
def fetchKmlDescription(url):
'''Read the description field from the kml file'''
## If any of these fields are not found, replace with a placeholder.
#EXPECTED_FIELDS = ['modis_id', 'landsat_id', 'sentinel1_id']
#EMPTY_FIELD_TAG = 'None'
# Try to read in the description string
kmlText = urllib2.urlopen(url).read()
parsedFile = BeautifulSoup(kmlText)
for line in parsedFile.findAll('description'):
text = line.string
# Parse the JSON data if it exists
try:
info = json.loads(text)
except:
info = dict()
## Fill in missing fields
#for f in EXPECTED_FIELDS:
# if not (f in info):
# info[f] = EMPTY_FIELD_TAG
return info
def expandSensorsList(sensors):
'''Expand the abbreviated sensor list to full sensor names'''
string = ''
pairs = [('Modis', 'M'), ('Landsat', 'L'), ('Sentinel-1', 'S')]
for pair in pairs:
if pair[1] in sensors:
string += (' ' + pair[0])
if not string:
string = 'Error: Sensor list "'+sensors+'" not parsed!'
return string
def getLayerInfo(kmlInfo):
'''Given the parsed KML description object, set up EE layer info'''
# The information is already in an easy to use format
# TODO: Refine the display parameters?
layers = []
if 'modis_image_id_A' in kmlInfo:
modisA = ee.Image(kmlInfo['modis_image_id_A'])
modisQ = ee.Image(kmlInfo['modis_image_id_Q'])
modis = modisQ.addBands(modisA, ['sur_refl_b06'])
modis_visualization = modis.getMapId({
'min': 0,
'max': 3000,
'bands': 'sur_refl_b01, sur_refl_b02, sur_refl_b06'
})
layers.append({
'mapid': modis_visualization['mapid'],
'label': 'modis',
'token': modis_visualization['token']
})
if 'landsat_image_id' in kmlInfo:
landsat = ee.Image(kmlInfo['landsat_image_id'])
# Pick the correct bands for this satellite
bands = 'B3, B2, B1'
if 'LC8' in kmlInfo['landsat_image_id']:
bands = 'B4, B3, B2'
landsat_visualization = landsat.getMapId({
'min': 0,
'max': 0.75,
'bands': bands
})
layers.append({
'mapid': landsat_visualization['mapid'],
'label': 'landsat',
'token': landsat_visualization['token']
})
if 'sentinel1_image_id' in kmlInfo:
sentinel1 = ee.Image(kmlInfo['sentinel1_image_id'])
sentinel1_visualization = sentinel1.getMapId({
'min': -30,
'max': 5,
'bands': sentinel1.bandNames().getInfo()[0]
})
layers.append({
'mapid': sentinel1_visualization['mapid'],
'label': 'sentinel1',
'token': sentinel1_visualization['token']
})
return layers
class GetMapData(webapp2.RequestHandler):
"""Retrieves EE data on request."""
def get(self):
ee.Initialize(config.EE_CREDENTIALS)
layers = [] # We will fill this up with EE layer information
# Use the MCD12 land-cover as training data.
modis_landcover = ee.Image('MCD12Q1/MCD12Q1_005_2001_01_01').select('Land_Cover_Type_1')
# A palette to use for visualizing landcover images.
modis_landcover_palette = ','.join([
'aec3d4', # water
'152106', '225129', '369b47', '30eb5b', '387242', # forest
'6a2325', 'c3aa69', 'b76031', 'd9903d', '91af40', # shrub, grass and
# savanah
'111149', # wetlands
'8dc33b', # croplands
'cc0013', # urban
'6ca80d', # crop mosaic
'd7cdcc', # snow and ice
'f7e084', # barren
'6f6f6f' # tundra
])
# A set of visualization parameters using the landcover palette.
modis_landcover_visualization_options = {
'palette': modis_landcover_palette,
'min': 0,
'max': 17,
'format': 'png'
}
# Add the MODIS landcover image.
modis_landcover_visualization = modis_landcover.getMapId(modis_landcover_visualization_options)
layers.append({
'mapid': modis_landcover_visualization['mapid'],
'label': 'MODIS landcover',
'token': modis_landcover_visualization['token']
})
# Add the Landsat composite, visualizing just the [30, 20, 10] bands.
landsat_composite = ee.Image('L7_TOA_1YEAR_2000')
landsat_composite_visualization = landsat_composite.getMapId({
'min': 0,
'max': 100,
'bands': ','.join(['30', '20', '10'])
})
layers.append({
'mapid': landsat_composite_visualization['mapid'],
'label': 'Landsat composite',
'token': landsat_composite_visualization['token']
})
text = json.dumps(layers)
print text
self.response.out.write(text)
class MainPage(webapp2.RequestHandler):
'''The splash page that the user sees when they access the site'''
def get(self):
# Grab all dates where data is available
self._dateList = fetchDateList()
# Build the list of date options
optionText = ''
for dateString in self._dateList:
optionText += '<option>'+dateString.replace('_',' ')+'</option>'
# Insert the option section, leave the output section empty.
self._htmlText = renderHtml(PAGE_HTML, [('[OPTION_SECTION]', optionText),
('[OUTPUT_SECTION]', ''),
('[FEED_URL]', feed_url)])
# Write the output
self.response.write(self._htmlText)
MAP_MODIS_RADIO_SNIPPET = '<input type="radio" name="image" value="modis" > MODIS<br>'
MAP_LANDSAT_RADIO_SNIPPET = '<input type="radio" name="image" value="landsat" > Landsat<br>'
MAP_SENTINEL1_RADIO_SNIPPET = '<input type="radio" name="image" value="sentinel1" > Sentinel-1<br>'
class MapPage(webapp2.RequestHandler):
'''Similar to the main page, but with a map displayed.'''
def post(self):
# Init demo ee image
ee.Initialize(config.EE_CREDENTIALS)
#mapid = ee.Image('srtm90_v4').getMapId({'min': 0, 'max': 1000})
# Grab all dates where data is available
self._dateList = fetchDateList()
# Build the list of date options
optionText = ''
for dateString in self._dateList:
optionText += '<option>'+dateString.replace('_',' ')+'</option>'
# Insert the options section
self._htmlText = renderHtml(PAGE_HTML, [('[OPTION_SECTION]', optionText),
('[API_KEY]', 'AIzaSyAlcB6oaJeUdTz3I97cL47tFLIQfSu4j58'),
('[FEED_URL]', feed_url)])
# Fetch user selection
dateLocString = self.request.get('date_select', 'default_date!')
# This should only return one URL, provided that the location is included in dateLocString
try:
kmlUrls = getKmlUrlsForKey(dateLocString.replace(' ', '__'))
except:
kmlUrls = None
if not kmlUrls:
#newText = 'No KML files were found for this date!'
newText = dateLocString
else:
# Prepare the map HTML with the data we found
kmlUrl = kmlUrls[0]
kmlUrlInfo = extractInfoFromKmlUrl(kmlUrl) # TODO: Clean this up!
detailedInfo = fetchKmlDescription(kmlUrl) # TODO: Get all info from here!
layerInfo = getLayerInfo(detailedInfo)
sensorList = expandSensorsList(kmlUrlInfo['sensors'])
(modisRadioText, landsatRadioText, sentinel1RadioText) = ('', '', '')
if 'Modis' in sensorList:
modisRadioText = MAP_MODIS_RADIO_SNIPPET
if 'Landsat' in sensorList:
landsatRadioText = MAP_LANDSAT_RADIO_SNIPPET
if 'Sentinel-1' in sensorList:
sentinel1RadioText = MAP_SENTINEL1_RADIO_SNIPPET
detailedInfo['layers'] = layerInfo
#raise Exception(json.dumps(detailedInfo))
newText = renderHtml(MAP_HTML, [#('[EE_MAPID]', mapid['mapid']),
#('[EE_TOKEN]', mapid['token']),
('[API_KEY]', 'AIzaSyAlcB6oaJeUdTz3I97cL47tFLIQfSu4j58'),
('[MAP_TITLE]', dateLocString),
('[KML_URL]', kmlUrl),
#('[MODIS_ID]', detailedInfo['modis_id']),
('[RADIO_SECTION_MODIS]', modisRadioText),
('[RADIO_SECTION_LANDSAT]', landsatRadioText),
('[RADIO_SECTION_SENTINEL1]', sentinel1RadioText),
('[MAP_JSON_TEXT]', json.dumps(detailedInfo)),
('[SENSOR_LIST]', sensorList),
('[LAT]', str(kmlUrlInfo['lat'])),
('[LON]', str(kmlUrlInfo['lon']))
])
#newText = 'You selected: <pre>'+ cgi.escape(date) +'</pre>'
#newText = MAP_HTML
# Fill in the output section
text = renderHtml(self._htmlText, [('[OUTPUT_SECTION]', newText)])
# Write the output
self.response.write(text)
app = webapp2.WSGIApplication([
('/', MainPage),
('/selected', MapPage),
('/getmapdata', GetMapData)
], debug=True)
| |
import unittest
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils.test_utils import check_same_batch
class TestMultiAgentBatch(unittest.TestCase):
def test_timeslices_non_overlapping_experiences(self):
"""Tests if timeslices works as expected on a MultiAgentBatch
consisting of two non-overlapping SampleBatches.
"""
def _generate_data(agent_idx):
batch = SampleBatch(
{
SampleBatch.T: [0, 1],
SampleBatch.EPS_ID: 2 * [agent_idx],
SampleBatch.AGENT_INDEX: 2 * [agent_idx],
SampleBatch.SEQ_LENS: [2],
}
)
return batch
policy_batches = {str(idx): _generate_data(idx) for idx in (range(2))}
ma_batch = MultiAgentBatch(policy_batches, 4)
sliced_ma_batches = ma_batch.timeslices(1)
[
check_same_batch(i, j)
for i, j in zip(
sliced_ma_batches,
[
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"1": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [1],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"1": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [1],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
],
)
]
def test_timeslices_partially_overlapping_experiences(self):
"""Tests if timeslices works as expected on a MultiAgentBatch
consisting of two partially overlapping SampleBatches.
"""
def _generate_data(agent_idx, t_start):
batch = SampleBatch(
{
SampleBatch.T: [t_start, t_start + 1],
SampleBatch.EPS_ID: [0, 0],
SampleBatch.AGENT_INDEX: 2 * [agent_idx],
SampleBatch.SEQ_LENS: [2],
}
)
return batch
policy_batches = {str(idx): _generate_data(idx, idx) for idx in (range(2))}
ma_batch = MultiAgentBatch(policy_batches, 4)
sliced_ma_batches = ma_batch.timeslices(1)
[
check_same_batch(i, j)
for i, j in zip(
sliced_ma_batches,
[
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
),
"1": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
),
},
1,
),
MultiAgentBatch(
{
"1": SampleBatch(
{
SampleBatch.T: [2],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
)
},
1,
),
],
)
]
def test_timeslices_fully_overlapping_experiences(self):
"""Tests if timeslices works as expected on a MultiAgentBatch
consisting of two fully overlapping SampleBatches.
"""
def _generate_data(agent_idx):
batch = SampleBatch(
{
SampleBatch.T: [0, 1],
SampleBatch.EPS_ID: [0, 0],
SampleBatch.AGENT_INDEX: 2 * [agent_idx],
SampleBatch.SEQ_LENS: [2],
}
)
return batch
policy_batches = {str(idx): _generate_data(idx) for idx in (range(2))}
ma_batch = MultiAgentBatch(policy_batches, 4)
sliced_ma_batches = ma_batch.timeslices(1)
[
check_same_batch(i, j)
for i, j in zip(
sliced_ma_batches,
[
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
),
"1": SampleBatch(
{
SampleBatch.T: [0],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
),
},
1,
),
MultiAgentBatch(
{
"0": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [0],
SampleBatch.SEQ_LENS: [1],
}
),
"1": SampleBatch(
{
SampleBatch.T: [1],
SampleBatch.EPS_ID: [0],
SampleBatch.AGENT_INDEX: [1],
SampleBatch.SEQ_LENS: [1],
}
),
},
1,
),
],
)
]
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| |
'''
Copyright (c) 2012-2017, Agora Games, LLC All rights reserved.
https://github.com/agoragames/kairos/blob/master/LICENSE.txt
'''
from .exceptions import *
from .timeseries import *
import cql
import time
from datetime import date, datetime
from datetime import time as time_type
from decimal import Decimal
from Queue import Queue, Empty, Full
import re
from urlparse import *
# Test python3 compatibility
try:
x = long(1)
except NameError:
long = int
try:
x = unicode('foo')
except NameError:
unicode = str
TYPE_MAP = {
str : 'ascii',
'str' : 'ascii',
'string' : 'ascii',
unicode : 'text', # works for py3 too
'unicode' : 'text',
float : 'float',
'float' : 'float',
'double' : 'double',
int : 'int',
'int' : 'int',
'integer' : 'int',
long : 'varint', # works for py3 too
'long' : 'varint',
'int64' : 'bigint',
'decimal' : 'decimal',
bool : 'boolean',
'bool' : 'boolean',
'boolean' : 'boolean',
'text' : 'text',
'clob' : 'blob',
'blob' : 'blob',
'inet' : 'inet',
}
QUOTE_TYPES = set(['ascii','text','blob'])
QUOTE_MATCH = re.compile("^'.*'$")
def scoped_connection(func):
'''
Decorator that gives out connections.
'''
def _with(series, *args, **kwargs):
connection = None
try:
connection = series._connection()
return func(series, connection, *args, **kwargs)
finally:
series._return( connection )
return _with
class CassandraBackend(Timeseries):
def __new__(cls, *args, **kwargs):
if cls==CassandraBackend:
ttype = kwargs.pop('type', None)
if ttype=='series':
return CassandraSeries.__new__(CassandraSeries, *args, **kwargs)
elif ttype=='histogram':
return CassandraHistogram.__new__(CassandraHistogram, *args, **kwargs)
elif ttype=='count':
return CassandraCount.__new__(CassandraCount, *args, **kwargs)
elif ttype=='gauge':
return CassandraGauge.__new__(CassandraGauge, *args, **kwargs)
elif ttype=='set':
return CassandraSet.__new__(CassandraSet, *args, **kwargs)
raise NotImplementedError("No implementation for %s types"%(ttype))
return Timeseries.__new__(cls, *args, **kwargs)
@classmethod
def url_parse(self, url, **kwargs):
location = urlparse(url)
if location.scheme in ('cassandra','cql'):
host = location.netloc or "localhost:9160"
if re.search(":[0-9]+$", host):
ip,port = host.split(':')
else:
ip = host
port = 9160
keyspace = location.path[1:] or kwargs.get('database', 'kairos')
if '?' in keyspace:
keyspace,params = keyspace.split('?')
return cql.connect(ip, int(port), keyspace, cql_version='3.0.0', **kwargs)
def __init__(self, client, **kwargs):
'''
Initialize the sql backend after timeseries has processed the configuration.
'''
# Only CQL3 is supported
if client.cql_major_version != 3:
raise TypeError("Only CQL3 is supported")
vtype = kwargs.get('value_type', float)
if vtype in TYPE_MAP:
self._value_type = TYPE_MAP[vtype]
else:
raise TypeError("Unsupported type '%s'"%(vtype))
self._table = kwargs.get('table_name', self._table)
# copy internal variables of the connection for poor-mans pooling
self._host = client.host
self._port = client.port
self._keyspace = client.keyspace
self._cql_version = client.cql_version
self._compression = client.compression
self._consistency_level = client.consistency_level
self._transport = client.transport
self._credentials = client.credentials
self._pool = Queue(kwargs.get('pool_size',0))
self._pool.put( client )
super(CassandraBackend,self).__init__(client, **kwargs)
def _connection(self):
'''
Return a connection from the pool
'''
try:
return self._pool.get(False)
except Empty:
args = [
self._host, self._port, self._keyspace
]
kwargs = {
'user' : None,
'password' : None,
'cql_version' : self._cql_version,
'compression' : self._compression,
'consistency_level' : self._consistency_level,
'transport' : self._transport,
}
if self._credentials:
kwargs['user'] = self._credentials['user']
kwargs['password'] = self._credentials['password']
return cql.connect(*args, **kwargs)
def _return(self, connection):
try:
self._pool.put(connection, False)
except Full:
# do not return connection to the pool.
pass
def _insert(self, name, value, timestamp, intervals, **kwargs):
'''
Insert the new value.
'''
if self._value_type in QUOTE_TYPES and not QUOTE_MATCH.match(value):
value = "'%s'"%(value)
for interval,config in self._intervals.items():
timestamps = self._normalize_timestamps(timestamp, intervals, config)
for tstamp in timestamps:
self._insert_data(name, value, tstamp, interval, config, **kwargs)
@scoped_connection
def _insert_data(self, connection, name, value, timestamp, interval, config):
'''Helper to insert data into cql.'''
cursor = connection.cursor()
try:
stmt = self._insert_stmt(name, value, timestamp, interval, config)
if stmt:
cursor.execute(stmt)
finally:
cursor.close()
@scoped_connection
def _get(self, connection, name, interval, config, timestamp, **kws):
'''
Get the interval.
'''
i_bucket = config['i_calc'].to_bucket(timestamp)
fetch = kws.get('fetch')
process_row = kws.get('process_row') or self._process_row
rval = OrderedDict()
if fetch:
data = fetch( connection, self._table, name, interval, [i_bucket] )
else:
data = self._type_get(name, interval, i_bucket)
if config['coarse']:
if data:
rval[ config['i_calc'].from_bucket(i_bucket) ] = process_row(data.values()[0][None])
else:
rval[ config['i_calc'].from_bucket(i_bucket) ] = self._type_no_value()
else:
for r_bucket,row_data in data.values()[0].items():
rval[ config['r_calc'].from_bucket(r_bucket) ] = process_row(row_data)
return rval
@scoped_connection
def _series(self, connection, name, interval, config, buckets, **kws):
'''
Fetch a series of buckets.
'''
fetch = kws.get('fetch')
process_row = kws.get('process_row') or self._process_row
rval = OrderedDict()
if fetch:
data = fetch( connection, self._table, name, interval, buckets )
else:
data = self._type_get(name, interval, buckets[0], buckets[-1])
if config['coarse']:
for i_bucket in buckets:
i_key = config['i_calc'].from_bucket(i_bucket)
i_data = data.get( i_bucket )
if i_data:
rval[ i_key ] = process_row( i_data[None] )
else:
rval[ i_key ] = self._type_no_value()
else:
if data:
for i_bucket, i_data in data.items():
i_key = config['i_calc'].from_bucket(i_bucket)
rval[i_key] = OrderedDict()
for r_bucket, r_data in i_data.items():
r_key = config['r_calc'].from_bucket(r_bucket)
if r_data:
rval[i_key][r_key] = process_row(r_data)
else:
rval[i_key][r_key] = self._type_no_value()
return rval
@scoped_connection
def delete(self, connection, name):
cursor = connection.cursor()
try:
cursor.execute("DELETE FROM %s WHERE name='%s';"%(self._table,name))
finally:
cursor.close()
@scoped_connection
def delete_all(self, connection):
cursor = connection.cursor()
try:
cursor.execute("TRUNCATE %s"%(self._table))
finally:
cursor.close()
@scoped_connection
def list(self, connection):
cursor = connection.cursor()
rval = set()
try:
cursor.execute('SELECT name FROM %s'%(self._table))
for row in cursor:
rval.add(row[0])
finally:
cursor.close()
return list(rval)
@scoped_connection
def properties(self, connection, name):
cursor = connection.cursor()
rval = {}
try:
for interval,config in self._intervals.items():
rval.setdefault(interval, {})
cursor.execute('''SELECT i_time
FROM %s
WHERE name = '%s' AND interval = '%s'
ORDER BY interval ASC, i_time ASC
LIMIT 1'''%(self._table, name, interval))
rval[interval]['first'] = config['i_calc'].from_bucket(
cursor.fetchone()[0] )
cursor.execute('''SELECT i_time
FROM %s
WHERE name = '%s' AND interval = '%s'
ORDER BY interval DESC, i_time DESC
LIMIT 1'''%(self._table, name, interval))
rval[interval]['last'] = config['i_calc'].from_bucket(
cursor.fetchone()[0] )
finally:
cursor.close()
return rval
class CassandraSeries(CassandraBackend, Series):
def __init__(self, *a, **kwargs):
self._table = 'series'
super(CassandraSeries,self).__init__(*a, **kwargs)
cursor = self._client.cursor()
# TODO: support other value types
# TODO: use varint for [ir]_time?
try:
res = cursor.execute('''CREATE TABLE IF NOT EXISTS %s (
name text,
interval text,
i_time bigint,
r_time bigint,
value list<%s>,
PRIMARY KEY(name, interval, i_time, r_time)
)'''%(self._table, self._value_type))
except cql.ProgrammingError as pe:
if 'existing' not in str(pe):
raise
finally:
cursor.close()
def _insert_stmt(self, name, value, timestamp, interval, config):
'''Helper to generate the insert statement.'''
# Calculate the TTL and abort if inserting into the past
expire, ttl = config['expire'], config['ttl'](timestamp)
if expire and not ttl:
return None
i_time = config['i_calc'].to_bucket(timestamp)
if not config['coarse']:
r_time = config['r_calc'].to_bucket(timestamp)
else:
r_time = -1
# TODO: figure out escaping rules of CQL
table_spec = self._table
if ttl:
table_spec += " USING TTL %s "%(ttl)
stmt = '''UPDATE %s SET value = value + [%s]
WHERE name = '%s'
AND interval = '%s'
AND i_time = %s
AND r_time = %s'''%(table_spec, value, name, interval, i_time, r_time)
return stmt
@scoped_connection
def _type_get(self, connection, name, interval, i_bucket, i_end=None):
rval = OrderedDict()
# TODO: more efficient creation of query string
stmt = '''SELECT i_time, r_time, value
FROM %s
WHERE name = '%s' AND interval = '%s'
'''%(self._table, name, interval)
if i_end :
stmt += ' AND i_time >= %s AND i_time <= %s'%(i_bucket, i_end)
else:
stmt += ' AND i_time = %s'%(i_bucket)
stmt += ' ORDER BY interval, i_time, r_time'
cursor = connection.cursor()
try:
cursor.execute(stmt)
for row in cursor:
i_time, r_time, value = row
if r_time==-1:
r_time = None
rval.setdefault(i_time,OrderedDict())[r_time] = value
finally:
cursor.close()
return rval
class CassandraHistogram(CassandraBackend, Histogram):
def __init__(self, *a, **kwargs):
self._table = 'histogram'
super(CassandraHistogram,self).__init__(*a, **kwargs)
# TODO: use varint for [ir]_time?
# TODO: support other value types
cursor = self._client.cursor()
try:
res = cursor.execute('''CREATE TABLE IF NOT EXISTS %s (
name text,
interval text,
i_time bigint,
r_time bigint,
value %s,
count counter,
PRIMARY KEY(name, interval, i_time, r_time, value)
)'''%(self._table, self._value_type))
except cql.ProgrammingError as pe:
if 'existing' not in str(pe):
raise
finally:
cursor.close()
def _insert_stmt(self, name, value, timestamp, interval, config):
'''Helper to generate the insert statement.'''
# Calculate the TTL and abort if inserting into the past
expire, ttl = config['expire'], config['ttl'](timestamp)
if expire and not ttl:
return None
i_time = config['i_calc'].to_bucket(timestamp)
if not config['coarse']:
r_time = config['r_calc'].to_bucket(timestamp)
else:
r_time = -1
# TODO: figure out escaping rules of CQL
table_spec = self._table
if ttl:
table_spec += " USING TTL %s "%(ttl)
stmt = '''UPDATE %s SET count = count + 1
WHERE name = '%s'
AND interval = '%s'
AND i_time = %s
AND r_time = %s
AND value = %s'''%(table_spec, name, interval, i_time, r_time, value)
return stmt
@scoped_connection
def _type_get(self, connection, name, interval, i_bucket, i_end=None):
rval = OrderedDict()
# TODO: more efficient creation of query string
stmt = '''SELECT i_time, r_time, value, count
FROM %s
WHERE name = '%s' AND interval = '%s'
'''%(self._table, name, interval)
if i_end :
stmt += ' AND i_time >= %s AND i_time <= %s'%(i_bucket, i_end)
else:
stmt += ' AND i_time = %s'%(i_bucket)
stmt += ' ORDER BY interval, i_time, r_time'
cursor = connection.cursor()
try:
cursor.execute(stmt)
for row in cursor:
i_time, r_time, value, count = row
if r_time==-1:
r_time = None
rval.setdefault(i_time,OrderedDict()).setdefault(r_time,{})[value] = count
finally:
cursor.close()
return rval
class CassandraCount(CassandraBackend, Count):
def __init__(self, *a, **kwargs):
self._table = 'count'
super(CassandraCount,self).__init__(*a, **kwargs)
# TODO: use varint for [ir]_time?
# TODO: support other value types
cursor = self._client.cursor()
try:
res = cursor.execute('''CREATE TABLE IF NOT EXISTS %s (
name text,
interval text,
i_time bigint,
r_time bigint,
count counter,
PRIMARY KEY(name, interval, i_time, r_time)
)'''%(self._table))
except cql.ProgrammingError as pe:
if 'existing' not in str(pe):
raise
finally:
cursor.close()
def _insert_stmt(self, name, value, timestamp, interval, config):
'''Helper to generate the insert statement.'''
# Calculate the TTL and abort if inserting into the past
expire, ttl = config['expire'], config['ttl'](timestamp)
if expire and not ttl:
return None
i_time = config['i_calc'].to_bucket(timestamp)
if not config['coarse']:
r_time = config['r_calc'].to_bucket(timestamp)
else:
r_time = -1
# TODO: figure out escaping rules of CQL
table_spec = self._table
if ttl:
table_spec += " USING TTL %s "%(ttl)
stmt = '''UPDATE %s SET count = count + %s
WHERE name = '%s'
AND interval = '%s'
AND i_time = %s
AND r_time = %s'''%(table_spec, value, name, interval, i_time, r_time)
return stmt
@scoped_connection
def _type_get(self, connection, name, interval, i_bucket, i_end=None):
rval = OrderedDict()
# TODO: more efficient creation of query string
stmt = '''SELECT i_time, r_time, count
FROM %s
WHERE name = '%s' AND interval = '%s'
'''%(self._table, name, interval)
if i_end :
stmt += ' AND i_time >= %s AND i_time <= %s'%(i_bucket, i_end)
else:
stmt += ' AND i_time = %s'%(i_bucket)
stmt += ' ORDER BY interval, i_time, r_time'
cursor = connection.cursor()
try:
cursor.execute(stmt)
for row in cursor:
i_time, r_time, count = row
if r_time==-1:
r_time = None
rval.setdefault(i_time,OrderedDict())[r_time] = count
finally:
cursor.close()
return rval
class CassandraGauge(CassandraBackend, Gauge):
def __init__(self, *a, **kwargs):
self._table = 'gauge'
super(CassandraGauge,self).__init__(*a, **kwargs)
# TODO: use varint for [ir]_time?
# TODO: support other value types
cursor = self._client.cursor()
try:
res = cursor.execute('''CREATE TABLE IF NOT EXISTS %s (
name text,
interval text,
i_time bigint,
r_time bigint,
value %s,
PRIMARY KEY(name, interval, i_time, r_time)
)'''%(self._table, self._value_type))
except cql.ProgrammingError as pe:
if 'existing' not in str(pe):
raise
finally:
cursor.close()
def _insert_stmt(self, name, value, timestamp, interval, config):
'''Helper to generate the insert statement.'''
# Calculate the TTL and abort if inserting into the past
expire, ttl = config['expire'], config['ttl'](timestamp)
if expire and not ttl:
return None
i_time = config['i_calc'].to_bucket(timestamp)
if not config['coarse']:
r_time = config['r_calc'].to_bucket(timestamp)
else:
r_time = -1
# TODO: figure out escaping rules of CQL
table_spec = self._table
if ttl:
table_spec += " USING TTL %s "%(ttl)
stmt = '''UPDATE %s SET value = %s
WHERE name = '%s'
AND interval = '%s'
AND i_time = %s
AND r_time = %s'''%(table_spec, value, name, interval, i_time, r_time)
return stmt
@scoped_connection
def _type_get(self, connection, name, interval, i_bucket, i_end=None):
rval = OrderedDict()
# TODO: more efficient creation of query string
stmt = '''SELECT i_time, r_time, value
FROM %s
WHERE name = '%s' AND interval = '%s'
'''%(self._table, name, interval)
if i_end :
stmt += ' AND i_time >= %s AND i_time <= %s'%(i_bucket, i_end)
else:
stmt += ' AND i_time = %s'%(i_bucket)
stmt += ' ORDER BY interval, i_time, r_time'
cursor = connection.cursor()
try:
cursor.execute(stmt)
for row in cursor:
i_time, r_time, value = row
if r_time==-1:
r_time = None
rval.setdefault(i_time,OrderedDict())[r_time] = value
finally:
cursor.close()
return rval
class CassandraSet(CassandraBackend, Set):
def __init__(self, *a, **kwargs):
self._table = 'sets'
super(CassandraSet,self).__init__(*a, **kwargs)
# TODO: use varint for [ir]_time?
# TODO: support other value types
cursor = self._client.cursor()
try:
res = cursor.execute('''CREATE TABLE IF NOT EXISTS %s (
name text,
interval text,
i_time bigint,
r_time bigint,
value %s,
PRIMARY KEY(name, interval, i_time, r_time, value)
)'''%(self._table, self._value_type))
except cql.ProgrammingError as pe:
if 'existing' not in str(pe):
raise
finally:
cursor.close()
def _insert_stmt(self, name, value, timestamp, interval, config):
'''Helper to generate the insert statement.'''
# Calculate the TTL and abort if inserting into the past
expire, ttl = config['expire'], config['ttl'](timestamp)
if expire and not ttl:
return None
i_time = config['i_calc'].to_bucket(timestamp)
if not config['coarse']:
r_time = config['r_calc'].to_bucket(timestamp)
else:
r_time = -1
# TODO: figure out escaping rules of CQL
stmt = '''INSERT INTO %s (name, interval, i_time, r_time, value)
VALUES ('%s', '%s', %s, %s, %s)'''%(self._table, name, interval, i_time, r_time, value)
expire = config['expire']
if ttl:
stmt += " USING TTL %s"%(ttl)
return stmt
@scoped_connection
def _type_get(self, connection, name, interval, i_bucket, i_end=None):
rval = OrderedDict()
# TODO: more efficient creation of query string
stmt = '''SELECT i_time, r_time, value
FROM %s
WHERE name = '%s' AND interval = '%s'
'''%(self._table, name, interval)
if i_end :
stmt += ' AND i_time >= %s AND i_time <= %s'%(i_bucket, i_end)
else:
stmt += ' AND i_time = %s'%(i_bucket)
stmt += ' ORDER BY interval, i_time, r_time'
cursor = connection.cursor()
try:
cursor.execute(stmt)
for row in cursor:
i_time, r_time, value = row
if r_time==-1:
r_time = None
rval.setdefault(i_time,OrderedDict()).setdefault(r_time,set()).add( value )
finally:
cursor.close()
return rval
| |
# -*- coding: utf-8 -*-
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for kms command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from random import randint
import mock
import unittest
from gslib.cloud_api import AccessDeniedException
from gslib.project_id import PopulateProjectId
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForJSON
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import SetBotoConfigForTest
from gslib.utils.retry_util import Retry
@SkipForS3('gsutil does not support KMS operations for S3 buckets.')
@SkipForXML('gsutil does not support KMS operations for S3 buckets.')
class TestKmsSuccessCases(testcase.GsUtilIntegrationTestCase):
"""Integration tests for the kms command."""
def setUp(self):
super(TestKmsSuccessCases, self).setUp()
# Make sure our keyRing exists (only needs to be done once, but subsequent
# attempts will receive a 409 and be treated as a success). Save the fully
# qualified name for use with creating keys later.
self.keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
@Retry(AssertionError, tries=3, timeout_secs=1)
def DoTestAuthorize(self, specified_project=None):
# Randomly pick 1 of 1000 key names.
key_name = testcase.KmsTestingResources.MUTABLE_KEY_NAME_TEMPLATE % (
randint(0, 9), randint(0, 9), randint(0, 9))
# Make sure the key with that name has been created.
key_fqn = self.kms_api.CreateCryptoKey(self.keyring_fqn, key_name)
# They key may have already been created and used in a previous test
# invocation; make sure it doesn't contain the IAM policy binding that
# allows our project to encrypt/decrypt with it.
key_policy = self.kms_api.GetKeyIamPolicy(key_fqn)
while key_policy.bindings:
key_policy.bindings.pop()
self.kms_api.SetKeyIamPolicy(key_fqn, key_policy)
# Set up the authorize command tokens.
authorize_cmd = ['kms', 'authorize', '-k', key_fqn]
if specified_project:
authorize_cmd.extend(['-p', specified_project])
stdout1 = self.RunGsUtil(authorize_cmd, return_stdout=True)
stdout2 = self.RunGsUtil(authorize_cmd, return_stdout=True)
self.assertIn(
'Authorized project %s to encrypt and decrypt with key:\n%s' %
(PopulateProjectId(None), key_fqn), stdout1)
self.assertIn(
('Project %s was already authorized to encrypt and decrypt with '
'key:\n%s.' % (PopulateProjectId(None), key_fqn)), stdout2)
def DoTestServiceaccount(self, specified_project=None):
serviceaccount_cmd = ['kms', 'serviceaccount']
if specified_project:
serviceaccount_cmd.extend(['-p', specified_project])
stdout = self.RunGsUtil(serviceaccount_cmd, return_stdout=True)
self.assertRegex(stdout,
r'[^@]+@gs-project-accounts\.iam\.gserviceaccount\.com')
def testKmsAuthorizeWithoutProjectOption(self):
self.DoTestAuthorize()
def testKmsAuthorizeWithProjectOption(self):
self.DoTestAuthorize(specified_project=PopulateProjectId(None))
def testKmsServiceaccountWithoutProjectOption(self):
self.DoTestServiceaccount()
def testKmsServiceaccountWithProjectOption(self):
self.DoTestServiceaccount(specified_project=PopulateProjectId(None))
def testKmsEncryptionFlow(self):
# Since we have to create a bucket and set a default KMS key to test most
# of these behaviors, we just test them all in one flow to reduce the number
# of API calls.
bucket_uri = self.CreateBucket()
# Make sure our key exists.
key_fqn = self.kms_api.CreateCryptoKey(
self.keyring_fqn, testcase.KmsTestingResources.CONSTANT_KEY_NAME)
encryption_get_cmd = ['kms', 'encryption', suri(bucket_uri)]
# Test output for bucket with no default KMS key set.
stdout = self.RunGsUtil(encryption_get_cmd, return_stdout=True)
self.assertIn('Bucket %s has no default encryption key' % suri(bucket_uri),
stdout)
# Test that setting a bucket's default KMS key works and shows up correctly
# via a follow-up call to display it.
stdout = self.RunGsUtil(
['kms', 'encryption', '-k', key_fqn,
suri(bucket_uri)],
return_stdout=True)
self.assertIn('Setting default KMS key for bucket %s...' % suri(bucket_uri),
stdout)
stdout = self.RunGsUtil(encryption_get_cmd, return_stdout=True)
self.assertIn(
'Default encryption key for %s:\n%s' % (suri(bucket_uri), key_fqn),
stdout)
# Finally, remove the bucket's default KMS key and make sure a follow-up
# call to display it shows that no default key is set.
stdout = self.RunGsUtil(
['kms', 'encryption', '-d', suri(bucket_uri)], return_stdout=True)
self.assertIn(
'Clearing default encryption key for %s...' % suri(bucket_uri), stdout)
stdout = self.RunGsUtil(encryption_get_cmd, return_stdout=True)
self.assertIn('Bucket %s has no default encryption key' % suri(bucket_uri),
stdout)
@SkipForS3('gsutil does not support KMS operations for S3 buckets.')
@SkipForJSON('These tests only check for failures when the XML API is forced.')
class TestKmsSubcommandsFailWhenXmlForced(testcase.GsUtilIntegrationTestCase):
"""Tests that kms subcommands fail early when forced to use the XML API."""
boto_config_hmac_auth_only = [
# Overwrite other credential types.
('Credentials', 'gs_oauth2_refresh_token', None),
('Credentials', 'gs_service_client_id', None),
('Credentials', 'gs_service_key_file', None),
('Credentials', 'gs_service_key_file_password', None),
# Add hmac credentials.
('Credentials', 'gs_access_key_id', 'dummykey'),
('Credentials', 'gs_secret_access_key', 'dummysecret'),
]
dummy_keyname = ('projects/my-project/locations/global/'
'keyRings/my-keyring/cryptoKeys/my-key')
def DoTestSubcommandFailsWhenXmlForcedFromHmacInBotoConfig(self, subcommand):
with SetBotoConfigForTest(self.boto_config_hmac_auth_only):
stderr = self.RunGsUtil(subcommand, expected_status=1, return_stderr=True)
self.assertIn('The "kms" command can only be used with', stderr)
def testEncryptionFailsWhenXmlForcedFromHmacInBotoConfig(self):
self.DoTestSubcommandFailsWhenXmlForcedFromHmacInBotoConfig(
['kms', 'encryption', 'gs://dummybucket'])
def testEncryptionDashKFailsWhenXmlForcedFromHmacInBotoConfig(self):
self.DoTestSubcommandFailsWhenXmlForcedFromHmacInBotoConfig(
['kms', 'encryption', '-k', self.dummy_keyname, 'gs://dummybucket'])
def testEncryptionDashDFailsWhenXmlForcedFromHmacInBotoConfig(self):
self.DoTestSubcommandFailsWhenXmlForcedFromHmacInBotoConfig(
['kms', 'encryption', '-d', 'gs://dummybucket'])
def testServiceaccountFailsWhenXmlForcedFromHmacInBotoConfig(self):
self.DoTestSubcommandFailsWhenXmlForcedFromHmacInBotoConfig(
['kms', 'serviceaccount', 'gs://dummybucket'])
def testAuthorizeFailsWhenXmlForcedFromHmacInBotoConfig(self):
self.DoTestSubcommandFailsWhenXmlForcedFromHmacInBotoConfig(
['kms', 'authorize', '-k', self.dummy_keyname, 'gs://dummybucket'])
class TestKmsUnitTests(testcase.GsUtilUnitTestCase):
"""Unit tests for gsutil kms."""
dummy_keyname = ('projects/my-project/locations/global/'
'keyRings/my-keyring/cryptoKeys/my-key')
@mock.patch('gslib.boto_translation.CloudApi.GetProjectServiceAccount')
@mock.patch('gslib.boto_translation.CloudApi.PatchBucket')
@mock.patch('gslib.kms_api.KmsApi.GetKeyIamPolicy')
@mock.patch('gslib.kms_api.KmsApi.SetKeyIamPolicy')
def testEncryptionSetKeySucceedsWhenUpdateKeyPolicySucceeds(
self, mock_set_key_iam_policy, mock_get_key_iam_policy, mock_patch_bucket,
mock_get_project_service_account):
bucket_uri = self.CreateBucket()
mock_get_key_iam_policy.return_value.bindings = []
mock_get_project_service_account.return_value.email_address = 'dummy@google.com'
stdout = self.RunCommand(
'kms', ['encryption', '-k', self.dummy_keyname,
suri(bucket_uri)],
return_stdout=True)
self.assertIn(b'Setting default KMS key for bucket', stdout)
@mock.patch('gslib.boto_translation.CloudApi.GetProjectServiceAccount')
@mock.patch('gslib.boto_translation.CloudApi.PatchBucket')
@mock.patch('gslib.kms_api.KmsApi.GetKeyIamPolicy')
@mock.patch('gslib.kms_api.KmsApi.SetKeyIamPolicy')
def testEncryptionSetKeySucceedsWhenUpdateKeyPolicyFailsWithWarningFlag(
self, mock_set_key_iam_policy, mock_get_key_iam_policy, mock_patch_bucket,
mock_get_project_service_account):
bucket_uri = self.CreateBucket()
mock_get_key_iam_policy.side_effect = AccessDeniedException(
'Permission denied')
mock_get_project_service_account.return_value.email_address = 'dummy@google.com'
stdout = self.RunCommand(
'kms', ['encryption', '-k', self.dummy_keyname, '-w',
suri(bucket_uri)],
return_stdout=True)
self.assertIn(b'Setting default KMS key for bucket', stdout)
@mock.patch('gslib.boto_translation.CloudApi.GetProjectServiceAccount')
@mock.patch('gslib.boto_translation.CloudApi.PatchBucket')
@mock.patch('gslib.kms_api.KmsApi.GetKeyIamPolicy')
@mock.patch('gslib.kms_api.KmsApi.SetKeyIamPolicy')
def testEncryptionSetKeyFailsWhenUpdateKeyPolicyFailsWithoutWarningFlag(
self, mock_set_key_iam_policy, mock_get_key_iam_policy, mock_patch_bucket,
mock_get_project_service_account):
bucket_uri = self.CreateBucket()
mock_get_key_iam_policy.side_effect = AccessDeniedException(
'Permission denied')
mock_get_project_service_account.return_value.email_address = 'dummy@google.com'
try:
stdout = self.RunCommand(
'kms', ['encryption', '-k', self.dummy_keyname,
suri(bucket_uri)],
return_stdout=True)
self.fail('Did not get expected AccessDeniedException')
except AccessDeniedException as e:
self.assertIn('Permission denied', e.reason)
| |
#! /usr/bin/python
#this version saves data on text file
#four routers
from xbee import XBee, ZigBee
import serial
from time import sleep
import re
from datetime import datetime as dt
from ConfigParser import SafeConfigParser
config = SafeConfigParser()
config.read('cfg_mag.ini')
#PORT = config.get('port', 'xb')
PORT='COM3'
BAUD_RATE = 9600
DEST_ADDR_LONG = '\x00\x00\x00\x00\x00\x00\xff\xff'
DEST_ADDR_RA = config.get('addr_long', 'ra')
DEST_ADDR_RB = config.get('addr_long', 'rb')
DEST_ADDR_RC = config.get('addr_long', 'rc')
DEST_ADDR_RD = config.get('addr_long', 'rd')
ADDR_RA = config.get('addr', 'ra')
ADDR_RB = config.get('addr', 'rb')
ADDR_RC = config.get('addr', 'rc')
ADDR_RD = config.get('addr', 'rd')
RA = config.get('name', 'ra')
RB = config.get('name', 'rb')
RC = config.get('name', 'rc')
RD = config.get('name', 'rd')
ser = serial.Serial(PORT, BAUD_RATE)
ser.timeout = 60
# Create API object
xbee = ZigBee(ser,escaped=True)
import pprint
rssrep=""
parDB=0
def getRssi():
global rssrep
global statDB
global parDB
rssrep=re.sub('[^A-Z0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
rssrep=rssrep+config.get('name', 'c')
#A
xbee.remote_at(dest_addr='\xff\xfe', #remote_at RSSI
dest_addr_long=DEST_ADDR_RA,
command="DB",
frame_id="A")
respdb_ra = xbee.wait_read_frame()
#print respdb_ra
statDB = respdb_ra['status']
statDB = ord(statDB)
if statDB is 0:
parDB = respdb_ra['parameter']
parDB = ord(parDB)
print RA,"is alive. RSS is -",parDB,"dBm"
rssrep = rssrep+","+RA
rssrep = rssrep+","+str(ord(respdb_ra['parameter']))
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
else:
print "Can't connect to", RA
rssrep = rssrep+","+RA
rssrep = rssrep+","+"100"
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
rssrep=""
#B
xbee.remote_at(dest_addr='\xff\xfe', #remote_at RSSI
dest_addr_long=DEST_ADDR_RB,
command="DB",
frame_id="A")
respdb_rb = xbee.wait_read_frame()
#print respdb_rb
statDB = respdb_rb['status']
statDB = ord(statDB)
if statDB is 0:
parDB = respdb_rb['parameter']
parDB = ord(parDB)
print RB,"is alive. RSS is -",parDB,"dBm"
rssrep = rssrep+","+RB
rssrep = rssrep+","+str(ord(respdb_rb['parameter']))
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
else:
print "Can't connect to", RB
rssrep = rssrep+","+RB
rssrep = rssrep+","+"100"
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
rssrep=""
#C
xbee.remote_at(dest_addr='\xff\xfe', #remote_at RSSI
dest_addr_long=DEST_ADDR_RC,
command="DB",
frame_id="A")
respdb_rc = xbee.wait_read_frame()
#print respdb_ra
statDB = respdb_rc['status']
statDB = ord(statDB)
if statDB is 0:
parDB = respdb_rc['parameter']
parDB = ord(parDB)
print RC,"is alive. RSS is -",parDB,"dBm"
rssrep = rssrep+","+RC
rssrep = rssrep+","+str(ord(respdb_rc['parameter']))
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
else:
print "Can't connect to", RC
rssrep = rssrep+","+RC
rssrep = rssrep+","+"100"
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
rssrep=""
#D
xbee.remote_at(dest_addr='\xff\xfe', #remote_at RSSI
dest_addr_long=DEST_ADDR_RD,
command="DB",
frame_id="A")
respdb_rd = xbee.wait_read_frame()
#print respdb_ra
statDB = respdb_rd['status']
statDB = ord(statDB)
if statDB is 0:
parDB = respdb_rd['parameter']
parDB = ord(parDB)
print RD,"is alive. RSS is -",parDB,"dBm"
rssrep = rssrep+","+RD
rssrep = rssrep+","+str(ord(respdb_rd['parameter']))
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
else:
print "Can't connect to", RD
rssrep = rssrep+","+RD
rssrep = rssrep+","+"100"
rssrep=re.sub('[^A-Zbcxy0-9\,]',"",rssrep)
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write(rssrep)
f.close()
rssrep=""
#f=open("/home/pi/Server/outbox/rssdata.txt", "a")
f=open("outbox/rssdata.txt","a")
f.write("*"+dt.today().strftime("%y%m%d%H%M%S")+"\n")
f.close()
#return statDB
return
def wakeup():
#part to discovery shot 16-bit address
xbee.send("tx",data="000\n",dest_addr_long=DEST_ADDR_LONG,dest_addr="\xff\xfe")
resp = xbee.wait_read_frame()
#shot_addr = resp["dest_addr"]
print "Wake up"
sleep(3)
getRssi()
wakeup()
ser.close()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Launches PODs"""
import json
import math
import time
import warnings
from datetime import datetime as dt
from typing import Optional, Tuple
import pendulum
import tenacity
from kubernetes import client, watch
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream as kubernetes_stream
from requests.exceptions import HTTPError
from airflow.exceptions import AirflowException
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.settings import pod_mutation_hook
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
warnings.warn(
"""
Please use :mod: Please use `airflow.providers.cncf.kubernetes.utils.pod_manager`
To use this module install the provider package by installing this pip package:
https://pypi.org/project/apache-airflow-providers-cncf-kubernetes/
""",
DeprecationWarning,
stacklevel=2,
)
class PodStatus:
"""Status of the PODs"""
PENDING = 'pending'
RUNNING = 'running'
FAILED = 'failed'
SUCCEEDED = 'succeeded'
class PodLauncher(LoggingMixin):
"""Deprecated class for launching pods. please use
airflow.providers.cncf.kubernetes.utils.pod_manager.PodManager instead
"""
def __init__(
self,
kube_client: client.CoreV1Api = None,
in_cluster: bool = True,
cluster_context: Optional[str] = None,
extract_xcom: bool = False,
):
"""
Deprecated class for launching pods. please use
airflow.providers.cncf.kubernetes.utils.pod_manager.PodManager instead
Creates the launcher.
:param kube_client: kubernetes client
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
:param extract_xcom: whether we should extract xcom
"""
super().__init__()
self._client = kube_client or get_kube_client(in_cluster=in_cluster, cluster_context=cluster_context)
self._watch = watch.Watch()
self.extract_xcom = extract_xcom
def run_pod_async(self, pod: V1Pod, **kwargs):
"""Runs POD asynchronously"""
pod_mutation_hook(pod)
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug('Pod Creation Request: \n%s', json_pod)
try:
resp = self._client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug('Pod Creation Response: %s', resp)
except Exception as e:
self.log.exception('Exception when attempting to create Namespaced Pod: %s', json_pod)
raise e
return resp
def delete_pod(self, pod: V1Pod):
"""Deletes POD"""
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def start_pod(self, pod: V1Pod, startup_timeout: int = 120):
"""
Launches the pod synchronously and waits for completion.
:param pod:
:param startup_timeout: Timeout for startup of the pod (if pod is pending for too long, fails task)
:return:
"""
resp = self.run_pod_async(pod)
curr_time = dt.now()
if resp.status.start_time is None:
while self.pod_not_started(pod):
self.log.warning("Pod not yet started: %s", pod.metadata.name)
delta = dt.now() - curr_time
if delta.total_seconds() >= startup_timeout:
raise AirflowException("Pod took too long to start")
time.sleep(1)
def monitor_pod(self, pod: V1Pod, get_logs: bool) -> Tuple[State, Optional[str]]:
"""
Monitors a pod and returns the final state
:param pod: pod spec that will be monitored
:type pod : V1Pod
:param get_logs: whether to read the logs locally
:return: Tuple[State, Optional[str]]
"""
if get_logs:
read_logs_since_sec = None
last_log_time = None
while True:
logs = self.read_pod_logs(pod, timestamps=True, since_seconds=read_logs_since_sec)
for line in logs:
timestamp, message = self.parse_log_line(line.decode('utf-8'))
last_log_time = pendulum.parse(timestamp)
self.log.info(message)
time.sleep(1)
if not self.base_container_is_running(pod):
break
self.log.warning('Pod %s log read interrupted', pod.metadata.name)
if last_log_time:
delta = pendulum.now() - last_log_time
# Prefer logs duplication rather than loss
read_logs_since_sec = math.ceil(delta.total_seconds())
result = None
if self.extract_xcom:
while self.base_container_is_running(pod):
self.log.info('Container %s has state %s', pod.metadata.name, State.RUNNING)
time.sleep(2)
result = self._extract_xcom(pod)
self.log.info(result)
result = json.loads(result)
while self.pod_is_running(pod):
self.log.info('Pod %s has state %s', pod.metadata.name, State.RUNNING)
time.sleep(2)
return self._task_status(self.read_pod(pod)), result
def parse_log_line(self, line: str) -> Tuple[str, str]:
"""
Parse K8s log line and returns the final state
:param line: k8s log line
:type line: str
:return: timestamp and log message
:rtype: Tuple[str, str]
"""
split_at = line.find(' ')
if split_at == -1:
raise Exception(f'Log not in "{{timestamp}} {{log}}" format. Got: {line}')
timestamp = line[:split_at]
message = line[split_at + 1 :].rstrip()
return timestamp, message
def _task_status(self, event):
self.log.info('Event: %s had an event of type %s', event.metadata.name, event.status.phase)
status = self.process_status(event.metadata.name, event.status.phase)
return status
def pod_not_started(self, pod: V1Pod):
"""Tests if pod has not started"""
state = self._task_status(self.read_pod(pod))
return state == State.QUEUED
def pod_is_running(self, pod: V1Pod):
"""Tests if pod is running"""
state = self._task_status(self.read_pod(pod))
return state not in (State.SUCCESS, State.FAILED)
def base_container_is_running(self, pod: V1Pod):
"""Tests if base container is running"""
event = self.read_pod(pod)
status = next(iter(filter(lambda s: s.name == 'base', event.status.container_statuses)), None)
if not status:
return False
return status.state.running is not None
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_logs(
self,
pod: V1Pod,
tail_lines: Optional[int] = None,
timestamps: bool = False,
since_seconds: Optional[int] = None,
):
"""Reads log from the POD"""
additional_kwargs = {}
if since_seconds:
additional_kwargs['since_seconds'] = since_seconds
if tail_lines:
additional_kwargs['tail_lines'] = tail_lines
try:
return self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container='base',
follow=True,
timestamps=timestamps,
_preload_content=False,
**additional_kwargs,
)
except HTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_events(self, pod):
"""Reads events from the POD"""
try:
return self._client.list_namespaced_event(
namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
)
except HTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod(self, pod: V1Pod):
"""Read POD information"""
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except HTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
def _extract_xcom(self, pod: V1Pod):
resp = kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=['/bin/sh'],
stdin=True,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
try:
result = self._exec_pod_command(resp, f'cat {PodDefaults.XCOM_MOUNT_PATH}/return.json')
self._exec_pod_command(resp, 'kill -s SIGINT 1')
finally:
resp.close()
if result is None:
raise AirflowException(f'Failed to extract xcom from pod: {pod.metadata.name}')
return result
def _exec_pod_command(self, resp, command):
if resp.is_open():
self.log.info('Running command... %s\n', command)
resp.write_stdin(command + '\n')
while resp.is_open():
resp.update(timeout=1)
if resp.peek_stdout():
return resp.read_stdout()
if resp.peek_stderr():
self.log.info(resp.read_stderr())
break
return None
def process_status(self, job_id, status):
"""Process status information for the JOB"""
status = status.lower()
if status == PodStatus.PENDING:
return State.QUEUED
elif status == PodStatus.FAILED:
self.log.error('Event with job id %s Failed', job_id)
return State.FAILED
elif status == PodStatus.SUCCEEDED:
self.log.info('Event with job id %s Succeeded', job_id)
return State.SUCCESS
elif status == PodStatus.RUNNING:
return State.RUNNING
else:
self.log.error('Event: Invalid state %s on job %s', status, job_id)
return State.FAILED
| |
# -*- test-case-name: foolscap.test.test_promise -*-
from twisted.python import util
from twisted.python.failure import Failure
from twisted.internet import defer
from foolscap.eventual import eventually
id = util.unsignedID
EVENTUAL, CHAINED, NEAR, BROKEN = range(4)
class UsageError(Exception):
"""Raised when you do something inappropriate to a Promise."""
def _ignore(results):
pass
class Promise(object):
"""I am a promise of a future result. I am a lot like a Deferred, except
that my promised result is usually an instance. I make it possible to
schedule method invocations on this future instance, returning Promises
for the results.
Promises are always in one of three states: Eventual, Fulfilled, and
Broken. (see http://www.erights.org/elib/concurrency/refmech.html for a
pretty picture). They start as Eventual, meaning we do not yet know
whether they will resolve or not. In this state, method invocations are
queued. Eventually the Promise will be 'resolved' into either the
Fulfilled or the Broken state. Fulfilled means that the promise contains
a live object to which methods can be dispatched synchronously. Broken
promises are incapable of invoking methods: they all result in Failure.
Method invocation is always asynchronous: it always returns a Promise.
The only thing you can do with a promise 'p1' is to perform an
eventual-send on it, like so::
sendOnly(p1).foo(args) # ignores the result
p2 = send(p1).bar(args) # creates a Promise for the result
p2 = p1.bar(args) # same as send(p1).bar(args)
Or wait for it to resolve, using one of the following::
d = when(p); d.addCallback(cb) # provides a Deferred
p._then(cb, *args, **kwargs) # like when(p).addCallback(cb,*a,**kw)
p._except(cb, *args, **kwargs) # like when(p).addErrback(cb,*a,**kw)
The _then and _except forms return the same Promise. You can set up
chains of calls that will be invoked in the future, using a dataflow
style, like this::
p = getPromiseForServer()
d = p.getDatabase('db1')
r = d.getRecord(name)
def _print(record):
print 'the record says', record
def _oops(failure):
print 'something failed:', failure
r._then(_print)
r._except(_oops)
Or all collapsed in one sequence like::
getPromiseForServer().getDatabase('db1').getRecord(name)._then(_print)
The eventual-send will eventually invoke the method foo(args) on the
promise's resolution. This will return a new Promise for the results of
that method call.
"""
# all our internal methods are private, to avoid a confusing lack of an
# error message if someone tries to make a synchronous method call on us
# with a name that happens to match an internal one.
_state = EVENTUAL
_useDataflowStyle = True # enables p.foo(args)
def __init__(self):
self._watchers = []
self._pendingMethods = [] # list of (methname, args, kwargs, p)
# _then and _except are our only public methods. All other access is
# through normal (not underscore-prefixed) attribute names, which
# indicate names of methods on the target object that should be called
# later.
def _then(self, cb, *args, **kwargs):
d = self._wait_for_resolution()
d.addCallback(cb, *args, **kwargs)
d.addErrback(lambda ignore: None)
return self
def _except(self, cb, *args, **kwargs):
d = self._wait_for_resolution()
d.addErrback(cb, *args, **kwargs)
return self
# everything beyond here is private to this module
def __repr__(self):
return "<Promise %#x>" % id(self)
def __getattr__(self, name):
if not self._useDataflowStyle:
raise AttributeError("no such attribute %s" % name)
def newmethod(*args, **kwargs):
return self._send(name, args, kwargs)
return newmethod
# _send and _sendOnly are used by send() and sendOnly(). _send is also
# used by regular attribute access.
def _send(self, methname, args, kwargs):
"""Return a Promise (for the result of the call) when the call is
eventually made. The call is guaranteed to not fire in this turn."""
# this is called by send()
p, resolver = makePromise()
if self._state in (EVENTUAL, CHAINED):
self._pendingMethods.append((methname, args, kwargs, resolver))
else:
eventually(self._deliver, methname, args, kwargs, resolver)
return p
def _sendOnly(self, methname, args, kwargs):
"""Send a message like _send, but discard the result."""
# this is called by sendOnly()
if self._state in (EVENTUAL, CHAINED):
self._pendingMethods.append((methname, args, kwargs, _ignore))
else:
eventually(self._deliver, methname, args, kwargs, _ignore)
# _wait_for_resolution is used by when(), as well as _then and _except
def _wait_for_resolution(self):
"""Return a Deferred that will fire (with whatever was passed to
_resolve) when this Promise moves to a RESOLVED state (either NEAR or
BROKEN)."""
# this is called by when()
if self._state in (EVENTUAL, CHAINED):
d = defer.Deferred()
self._watchers.append(d)
return d
if self._state == NEAR:
return defer.succeed(self._target)
# self._state == BROKEN
return defer.fail(self._target)
# _resolve is our resolver method, and is handed out by makePromise()
def _resolve(self, target_or_failure):
"""Resolve this Promise to refer to the given target. If called with
a Failure, the Promise is now BROKEN. _resolve may only be called
once."""
# E splits this method into two pieces resolve(result) and
# smash(problem). It is easier for us to keep them in one piece,
# because d.addBoth(p._resolve) is convenient.
if self._state != EVENTUAL:
raise UsageError("Promises may not be resolved multiple times")
self._resolve2(target_or_failure)
# the remaining methods are internal, for use by this class only
def _resolve2(self, target_or_failure):
# we may be called with a Promise, an immediate value, or a Failure
if isinstance(target_or_failure, Promise):
self._state = CHAINED
when(target_or_failure).addBoth(self._resolve2)
return
if isinstance(target_or_failure, Failure):
self._break(target_or_failure)
return
self._target = target_or_failure
self._deliver_queued_messages()
self._state = NEAR
def _break(self, failure):
# TODO: think about what you do to break a resolved promise. Once the
# Promise is in the NEAR state, it can't be broken, but eventually
# we're going to have a FAR state, which *can* be broken.
"""Put this Promise in the BROKEN state."""
if not isinstance(failure, Failure):
raise UsageError("Promises must be broken with a Failure")
if self._state == BROKEN:
raise UsageError("Broken Promises may not be re-broken")
self._target = failure
if self._state in (EVENTUAL, CHAINED):
self._deliver_queued_messages()
self._state == BROKEN
def _invoke_method(self, name, args, kwargs):
if isinstance(self._target, Failure):
return self._target
method = getattr(self._target, name)
res = method(*args, **kwargs)
return res
def _deliverOneMethod(self, methname, args, kwargs):
method = getattr(self._target, methname)
return method(*args, **kwargs)
def _deliver(self, methname, args, kwargs, resolver):
# the resolver will be fired with both success and Failure
t = self._target
if isinstance(t, Promise):
resolver(t._send(methname, args, kwargs))
elif isinstance(t, Failure):
resolver(t)
else:
d = defer.maybeDeferred(self._deliverOneMethod,
methname, args, kwargs)
d.addBoth(resolver)
def _deliver_queued_messages(self):
for (methname, args, kwargs, resolver) in self._pendingMethods:
eventually(self._deliver, methname, args, kwargs, resolver)
del self._pendingMethods
# Q: what are the partial-ordering semantics between queued messages
# and when() clauses that are waiting on this Promise to be resolved?
for d in self._watchers:
eventually(d.callback, self._target)
del self._watchers
def resolvedPromise(resolution):
p = Promise()
p._resolve(resolution)
return p
def makePromise():
p = Promise()
return p, p._resolve
class _MethodGetterWrapper(object):
def __init__(self, callback):
self.cb = [callback]
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError("method %s is probably private" % name)
cb = self.cb[0] # avoid bound-methodizing
def newmethod(*args, **kwargs):
return cb(name, args, kwargs)
return newmethod
def send(o):
"""Make an eventual-send call on object C{o}. Use this as follows::
p = send(o).foo(args)
C{o} can either be a Promise or an immediate value. The arguments can
either be promises or immediate values.
send() always returns a Promise, and the o.foo(args) method invocation
always takes place in a later reactor turn.
Many thanks to Mark Miller for suggesting this syntax to me.
"""
if isinstance(o, Promise):
return _MethodGetterWrapper(o._send)
p = resolvedPromise(o)
return _MethodGetterWrapper(p._send)
def sendOnly(o):
"""Make an eventual-send call on object C{o}, and ignore the results.
"""
if isinstance(o, Promise):
return _MethodGetterWrapper(o._sendOnly)
# this is a little bit heavyweight for a simple eventually(), but it
# makes the code simpler
p = resolvedPromise(o)
return _MethodGetterWrapper(p._sendOnly)
def when(p):
"""Turn a Promise into a Deferred that will fire with the enclosed object
when it is ready. Use this when you actually need to schedule something
to happen in a synchronous fashion. Most of the time, you can just invoke
methods on the Promise as if it were immediately available."""
assert isinstance(p, Promise)
return p._wait_for_resolution()
| |
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
print ("LAKE WASHING THE TABE. JOHNNY")
| |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import webob
from nova.api.openstack.compute.contrib import rescue as rescue_v2
from nova.api.openstack.compute.plugins.v3 import rescue as rescue_v21
from nova.api.openstack import extensions
from nova import compute
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
pass
def unrescue(self, context, instance):
pass
def fake_compute_get(*args, **kwargs):
return {'id': 1, 'uuid': UUID}
class RescueTestV21(test.NoDBTestCase):
def setUp(self):
super(RescueTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get", fake_compute_get)
self.stubs.Set(compute.api.API, "rescue", rescue)
self.stubs.Set(compute.api.API, "unrescue", unrescue)
self.controller = self._set_up_controller()
self.fake_req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
return rescue_v21.RescueController()
def test_rescue_from_locked_server(self):
def fake_rescue_from_locked_server(self, context,
instance, rescue_password=None, rescue_image_ref=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute.api.API,
'rescue',
fake_rescue_from_locked_server)
body = {"rescue": {"adminPass": "AABBCC112233"}}
self.assertRaises(webob.exc.HTTPConflict,
self.controller._rescue,
self.fake_req, UUID, body=body)
def test_rescue_with_preset_password(self):
body = {"rescue": {"adminPass": "AABBCC112233"}}
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual("AABBCC112233", resp['adminPass'])
def test_rescue_generates_password(self):
body = dict(rescue=None)
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual(CONF.password_length, len(resp['adminPass']))
def test_rescue_of_rescued_instance(self):
body = dict(rescue=None)
def fake_rescue(*args, **kwargs):
raise exception.InstanceInvalidState('fake message')
self.stubs.Set(compute.api.API, "rescue", fake_rescue)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._rescue,
self.fake_req, UUID, body=body)
def test_unrescue(self):
body = dict(unrescue=None)
resp = self.controller._unrescue(self.fake_req, UUID, body=body)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
rescue_v21.RescueController):
status_int = self.controller._unrescue.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(202, status_int)
def test_unrescue_from_locked_server(self):
def fake_unrescue_from_locked_server(self, context,
instance):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute.api.API,
'unrescue',
fake_unrescue_from_locked_server)
body = dict(unrescue=None)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._unrescue,
self.fake_req, UUID, body=body)
def test_unrescue_of_active_instance(self):
body = dict(unrescue=None)
def fake_unrescue(*args, **kwargs):
raise exception.InstanceInvalidState('fake message')
self.stubs.Set(compute.api.API, "unrescue", fake_unrescue)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._unrescue,
self.fake_req, UUID, body=body)
def test_rescue_raises_unrescuable(self):
body = dict(rescue=None)
def fake_rescue(*args, **kwargs):
raise exception.InstanceNotRescuable('fake message')
self.stubs.Set(compute.api.API, "rescue", fake_rescue)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._rescue,
self.fake_req, UUID, body=body)
@mock.patch('nova.compute.api.API.rescue')
def test_rescue_with_image_specified(self, mock_compute_api_rescue):
instance = fake_compute_get()
body = {"rescue": {"adminPass": "ABC123",
"rescue_image_ref": "img-id"}}
resp_json = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual("ABC123", resp_json['adminPass'])
mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
rescue_password=u'ABC123',
rescue_image_ref=u'img-id')
@mock.patch('nova.compute.api.API.rescue')
def test_rescue_without_image_specified(self, mock_compute_api_rescue):
instance = fake_compute_get()
body = {"rescue": {"adminPass": "ABC123"}}
resp_json = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual("ABC123", resp_json['adminPass'])
mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
rescue_password=u'ABC123',
rescue_image_ref=None)
def test_rescue_with_none(self):
body = dict(rescue=None)
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual(CONF.password_length, len(resp['adminPass']))
def test_rescue_with_empty_dict(self):
body = dict(rescue=dict())
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual(CONF.password_length, len(resp['adminPass']))
def test_rescue_disable_password(self):
self.flags(enable_instance_password=False)
body = dict(rescue=None)
resp_json = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertNotIn('adminPass', resp_json)
def test_rescue_with_invalid_property(self):
body = {"rescue": {"test": "test"}}
self.assertRaises(exception.ValidationError,
self.controller._rescue,
self.fake_req, UUID, body=body)
class RescueTestV20(RescueTestV21):
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-extended-rescue-with-image': 'fake'}
return rescue_v2.RescueController(ext_mgr)
def test_rescue_with_invalid_property(self):
# NOTE(cyeoh): input validation in original v2 code does not
# check for invalid properties.
pass
def test_rescue_disable_password(self):
# NOTE(cyeoh): Original v2.0 code does not support disabling
# the admin password being returned through a conf setting
pass
| |
from csv import reader
from django.core.management.base import BaseCommand, CommandError
import os
from django import db
from geo import errors
from geo.models import Geo
from hmda.models import HMDARecord
import sys
import traceback
import logging
class Command(BaseCommand):
args = "<path/to/20XXHMDALAR - National.csv> <delete_file:true/false> <filterhmda>"
help = """ Load HMDA data (for all states)."""
def handle(self, *args, **options):
if not args:
raise CommandError("Needs a first argument, " + Command.args)
delete_file = False
filter_hmda = False
self.total_skipped = 0
self.na_skipped = 0
self.total_lines_read = 0
self.other_skipped = 0
def get_logger():
logging.basicConfig(filename='hmdaload.log',
level=logging.INFO,
format='%(asctime)s %(message)s')
def log_info(message):
logging.info(message)
print message
get_logger()
### if delete_file argument, remove csv file after processing
### default is False
### if filter_hmda is passed in, setup known_hmda & geo_states
### else load all HMDA records without filtering
if len(args) > 1:
for arg in args:
if "delete_file:" in arg:
tmp_delete_flag= arg.split(":")
if tmp_delete_flag[1] == "true":
delete_file = True
print "************* CSV File(s) WiLL BE REMOVED AFTER PROCESSING ***********"
if "filterhmda" in arg:
filter_hmda = True
csv_files = []
if os.path.isfile(args[0]):
csv_files.append(args[0]);
elif os.path.isdir(args[0]):
working_directory = args[0]
for file in os.listdir(working_directory):
if os.path.isfile(os.path.join(working_directory,file)) and 'hmda_csv_' in file:
csv_files.append(os.path.join(working_directory, file))
else:
raise Exception("Not a file or Directory! " + args[0])
geo_states = set(
row['state'] for row in
Geo.objects.filter(geo_type=Geo.TRACT_TYPE).values('state').distinct()
)
db.reset_queries()
log_info("Filtering by states " + ", ".join(list(sorted(geo_states))))
if filter_hmda:
known_hmda = set(
row['statefp'] for row in
HMDARecord.objects.values('statefp').distinct())
log_info("Already have data for "+ ", ".join(list(sorted(known_hmda))))
db.reset_queries()
def records(self,csv_file):
"""A generator returning a new Record with each call. Required as
there are too many to instantiate in memory at once"""
prevent_delete= False
datafile = open(csv_file, 'r')
i = 0
inserted_counter = 0
skipped_counter = 0
log_info("Processing " + csv_file)
for row in reader(datafile):
i += 1
if i % 25000 == 0:
log_info("Records Processed For File " + str(i) )
try:
record = HMDARecord(
as_of_year=int(row[0]), respondent_id=row[1],
agency_code=row[2], loan_type=int(row[3]),
property_type=row[4], loan_purpose=int(row[5]),
owner_occupancy=int(row[6]), loan_amount_000s=int(row[7]),
preapproval=row[8], action_taken=int(row[9]),
msamd=row[10], statefp=row[11], countyfp=row[12],
census_tract_number=row[13], applicant_ethnicity=row[14],
co_applicant_ethnicity=row[15], applicant_race_1=row[16],
applicant_race_2=row[17], applicant_race_3=row[18],
applicant_race_4=row[19], applicant_race_5=row[20],
co_applicant_race_1=row[21], co_applicant_race_2=row[22],
co_applicant_race_3=row[23], co_applicant_race_4=row[24],
co_applicant_race_5=row[25], applicant_sex=int(row[26]),
co_applicant_sex=int(row[27]), applicant_income_000s=row[28],
purchaser_type=row[29], denial_reason_1=row[30],
denial_reason_2=row[31], denial_reason_3=row[32],
rate_spread=row[33], hoepa_status=row[34],
lien_status=row[35], edit_status=row[36],
sequence_number=row[37], population=row[38],
minority_population=row[39], ffieic_median_family_income=row[40],
tract_to_msamd_income=row[41], number_of_owner_occupied_units=row[42],
number_of_1_to_4_family_units=row[43], application_date_indicator=row[44])
censustract = row[11] + row[12] + row[13].replace('.', '')
record.geo_id = errors.in_2010.get(censustract, censustract)
record.institution_id = row[2]+row[1]
self.total_lines_read = self.total_lines_read + 1
if filter_hmda:
if (row[11] not in known_hmda and row[11] in geo_states and 'NA' not in record.geo_id):
inserted_counter +=1
yield record
else:
skipped_counter += 1
else:
if row[11] in geo_states and 'NA' not in record.geo_id:
inserted_counter =inserted_counter + 1
yield record
else:
if 'NA' in record.geo_id:
self.na_skipped = self.na_skipped + 1
else:
self.other_skipped = self.other_skipped +1
self.total_skipped = self.total_skipped + 1
skipped_counter += 1
except:
prevent_delete= True
log_info('*****************************')
log_info("Error processing csv_file")
log_info("Record Line Number " + str(i))
log_info("Row: "+ str(row))
log_info("Unexpected error:", sys.exc_info()[0])
log_info(traceback.print_exc())
log_info('*****************************')
datafile.close()
log_info("Finished Processing File: " + str(i))
log_info("Records That have been yield/Inserted For File: " + str(inserted_counter) )
log_info("Records Skipped For File: " + str(skipped_counter) )
if delete_file:
if not prevent_delete:
os.remove(csv_file)
window = [] # Need to materialize records for bulk_create
total_count = 0
for csv_file in csv_files:
for record in records(self,csv_file):
window.append(record)
total_count = total_count + 1
if len(window) > 999:
HMDARecord.objects.bulk_create(window,batch_size=200)
db.reset_queries()
window[:] = []
if (len(window) > 0):
log_info("window size (last records): " + str(len(window)))
HMDARecord.objects.bulk_create(window,batch_size=100)
db.reset_queries()
window[:] = []
log_info("All Files Total Records bulk inserted: " + str(total_count))
log_info("All Lines Read from All Files: " + str(self.total_lines_read))
log_info("All Files Total Skipped: " + str(self.total_skipped))
log_info("All Files Total Skipped for GeoID=NA: " + str(self.na_skipped))
log_info("All Files Total Skipped for other reason: " + str(self.other_skipped ))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from quantum.openstack.common import cfg
from quantum.openstack.common import fileutils
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'
' for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.