gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
# Adam Bowen - Jun 2016
# dx_jetstream_container.py
# Use this file as a starter for your python scripts, if you like
# requirements
# pip install docopt delphixpy
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script. This thing is brilliant.
"""Perform routine operations on Jetstream containers
Usage:
dx_jetstream_container.py --template <name> (--container <name> | --all_containers )
--operation <name> [-d <identifier> | --engine <identifier> | --all]
[--bookmark_name <name>] [--bookmark_tags <tags>] [--bookmark_shared <bool>]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_jetstream_container.py -h | --help | -v | --version
Perform routine operations on a Jetstream Container
Examples:
dx_jetstream_container.py --operation refresh --template "Masked SugarCRM Application" --container "Sugar Automated Testing Container"
dx_jetstream_container.py --operation reset --template "Masked SugarCRM Application" --all_containers
dx_jetstream_container.py --template "Masked SugarCRM Application" --container "Sugar Automated Testing Container" --operation bookmark --bookmark_name "Testing" --bookmark_tags "one,two,three" --bookmark_shared true
Options:
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--all_containers Run against all jetstream containers
--template <name> Name of Jetstream template to execute against.
--container <name> Name of Jetstream container to execute against.
--operation <name> Name of the operation to execute
Can be one of:
start, stop, recover, refresh, reset, bookmark
--bookmark_name <name> Name of the bookmark to create
(only valid with "--operation bookmark")
--bookmark_tags <tags> Comma-delimited list to tag the bookmark
(only valid with "--operation bookmark")
--bookmark_shared <bool> Share bookmark: true/false
[default: false]
--host <name> Name of environment in Delphix to execute against.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_jetstream_container_refresh.log]
-h --help Show this screen.
-v --version Show version.
"""
from __future__ import print_function
import json
import logging
import signal
import sys
import threading
import time
import traceback
from multiprocessing import Process
from os.path import basename
from time import sleep
from time import time
from docopt import docopt
from delphixpy.v1_6_0 import job_context
from delphixpy.v1_6_0.delphix_engine import DelphixEngine
from delphixpy.v1_6_0.exceptions import HttpError
from delphixpy.v1_6_0.exceptions import JobError
from delphixpy.v1_6_0.web import jetstream
from delphixpy.v1_6_0.web import job
from delphixpy.v1_6_0.web.vo import JSBookmark
from delphixpy.v1_6_0.web.vo import JSBookmarkCreateParameters
from delphixpy.v1_6_0.web.vo import JSTimelinePointLatestTimeInput
VERSION = "v.0.0.005"
# from delphixpy.v1_6_0.web.vo import
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
# from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = threading.Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def container_bookmark(
engine, server, container_obj, bookmark_name, bookmark_shared, tags
):
"""This function bookmarks the current branch on the container"""
# But first, let's make sure it is in a CONSISTENT state
container_recover(engine, server, container_obj)
# Next let's make sure it is started
container_start(engine, server, container_obj)
# Prepare the bookmark creation parameters
bookmark_create_params = JSBookmarkCreateParameters()
bookmark_create_params.bookmark = JSBookmark()
bookmark_create_params.bookmark.name = bookmark_name
bookmark_create_params.bookmark.branch = container_obj.active_branch
bookmark_create_params.bookmark.shared = bookmark_shared
bookmark_create_params.bookmark.tags = tags
bookmark_create_params.timeline_point_parameters = JSTimelinePointLatestTimeInput()
bookmark_create_params.timeline_point_parameters.source_data_layout = (
container_obj.reference
)
jetstream.bookmark.create(server, bookmark_create_params)
def container_recover(engine, server, container_obj):
"""This function recovers a container that is in an "INCONSISTENT" state"""
if container_obj.state == "INCONSISTENT":
# if not recover it
job_obj = jetstream.container.recover(server, container_obj.reference)
# wait for the recovery action to finish
job_context.wait(server, job_obj.reference)
# get the updated object with the new state
container_obj = jetstream.container.get(server, container_obj.reference)
return container_obj
@run_async
def container_recover_async(engine, server, container_obj):
"""This function recovers all specified containers asynchronously"""
container_recover(engine, server, container_obj)
@run_async
def container_refresh(engine, server, container_obj):
"""This function refreshes a container"""
# But first, let's make sure it is in a CONSISTENT state
container_recover(engine, server, container_obj)
# Next let's make sure it is started
container_start(engine, server, container_obj)
# Now let's refresh it.
refresh_job = jetstream.container.refresh(server, container_obj.reference)
@run_async
def container_reset(engine, server, container_obj):
"""This function resets a container"""
# But first, let's make sure it is in a CONSISTENT state
container_recover(engine, server, container_obj)
# Next let's make sure it is started
container_start(engine, server, container_obj)
# Now let's refresh it.
reset_job = jetstream.container.reset(server, container_obj.reference)
def container_start(engine, server, container_obj):
"""This function starts/enables a container that is in an "OFFLINE" state"""
if container_obj.state == "OFFLINE":
# if not, enable it
jetstream.container.enable(server, container_obj.reference)
@run_async
def container_start_async(engine, server, container_obj):
"""This function starts all specified containers asynchronously"""
container_start(engine, server, container_obj)
def container_stop(engine, server, container_obj):
"""This function starts/enables a container that is in an "OFFLINE" state"""
if container_obj.state == "ONLINE":
# if not, enable it
jetstream.container.disable(server, container_obj.reference)
@run_async
def container_stop_async(engine, server, container_obj):
"""This function starts all specified containers asynchronously"""
container_stop(engine, server, container_obj)
def find_container_by_name_and_template_name(
engine, server, container_name, template_name
):
template_obj = find_obj_by_name(engine, server, jetstream.template, template_name)
containers = jetstream.container.get_all(server, template=template_obj.reference)
for each in containers:
if each.name == container_name:
print_debug(engine["hostname"] + ": Found a match " + str(each.reference))
return each
print_info('Unable to find "' + container_name + '" in ' + template_name)
def find_all_containers_by_template_name(engine, server, template_name):
template_obj = find_obj_by_name(engine, server, jetstream.template, template_name)
containers = jetstream.container.get_all(server, template=template_obj.reference)
if containers:
for each in containers:
print_debug(engine["hostname"] + ": Found a match " + str(each.reference))
return containers
print_info('Unable to find "' + container_name + '" in ' + template_name)
def find_obj_by_name(engine, server, f_class, obj_name):
"""
Function to find objects by name and object class, and return object's reference as a string
You might use this function to find objects like groups.
"""
print_debug(
engine["hostname"]
+ ": Searching objects in the "
+ f_class.__name__
+ ' class\n for one named "'
+ obj_name
+ '"'
)
obj_ref = ""
all_objs = f_class.get_all(server)
for obj in all_objs:
if obj.name == obj_name:
print_debug(engine["hostname"] + ": Found a match " + str(obj.reference))
return obj
def get_config(config_file_path):
"""
This function reads in the dxtools.conf file
"""
# First test to see that the file is there and we can open it
try:
config_file = open(config_file_path).read()
except:
print_error(
"Was unable to open "
+ config_file_path
+ ". Please check the path and permissions, then try again."
)
sys.exit(1)
# Now parse the file contents as json and turn them into a python dictionary, throw an error if it isn't proper json
try:
config = json.loads(config_file)
except:
print_error(
"Was unable to read "
+ config_file_path
+ " as json. Please check file in a json formatter and try again."
)
sys.exit(1)
# Create a dictionary of engines (removing the data node from the dxtools.json, for easier parsing)
delphix_engines = {}
for each in config["data"]:
delphix_engines[each["hostname"]] = each
print_debug(delphix_engines)
return delphix_engines
def logging_est(logfile_path):
"""
Establish Logging
"""
global debug
logging.basicConfig(
filename=logfile_path,
format="%(levelname)s:%(asctime)s:%(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
print_info("Welcome to " + basename(__file__) + ", version " + VERSION)
global logger
debug = arguments["--debug"]
logger = logging.getLogger()
if debug == True:
logger.setLevel(10)
print_info("Debug Logging is enabled.")
def job_mode(server):
"""
This function tells Delphix how to execute jobs, based on the single_thread variable at the beginning of the file
"""
# Synchronously (one at a time)
if single_thread == True:
job_m = job_context.sync(server)
print_debug("These jobs will be executed synchronously")
# Or asynchronously
else:
job_m = job_context.asyncly(server)
print_debug("These jobs will be executed asynchronously")
return job_m
def job_wait(server):
"""
This job stops all work in the thread/process until jobs are completed.
"""
# Grab all the jos on the server (the last 25, be default)
all_jobs = job.get_all(server)
# For each job in the list, check to see if it is running (not ended)
for jobobj in all_jobs:
if not (jobobj.job_state in ["CANCELED", "COMPLETED", "FAILED"]):
print_debug(
"Waiting for "
+ jobobj.reference
+ " (currently: "
+ jobobj.job_state
+ ") to finish running against the container"
)
# If so, wait
job_context.wait(server, jobobj.reference)
def on_exit(sig, func=None):
"""
This function helps us end cleanly and with exit codes
"""
print_info("Shutdown Command Received")
print_info("Shutting down " + basename(__file__))
sys.exit(0)
def print_debug(print_obj):
"""
Call this function with a log message to prefix the message with DEBUG
"""
try:
if debug == True:
print("DEBUG: " + str(print_obj))
logging.debug(str(print_obj))
except:
pass
def print_error(print_obj):
"""
Call this function with a log message to prefix the message with ERROR
"""
print("ERROR: " + str(print_obj))
logging.error(str(print_obj))
def print_info(print_obj):
"""
Call this function with a log message to prefix the message with INFO
"""
print("INFO: " + str(print_obj))
logging.info(str(print_obj))
def print_warning(print_obj):
"""
Call this function with a log message to prefix the message with WARNING
"""
print("WARNING: " + str(print_obj))
logging.warning(str(print_obj))
def serversess(f_engine_address, f_engine_username, f_engine_password):
"""
Function to setup the session with the Delphix Engine
"""
server_session = DelphixEngine(
f_engine_address, f_engine_username, f_engine_password, "DOMAIN"
)
return server_session
def set_exit_handler(func):
"""
This function helps us set the correct exit code
"""
signal.signal(signal.SIGTERM, func)
@run_async
def main_workflow(engine):
"""
This function is where the main workflow resides.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
"""
# Pull out the values from the dictionary for this engine
engine_address = engine["ip_address"]
engine_username = engine["username"]
engine_password = engine["password"]
# Establish these variables as empty for use later
containers = []
jobs = {}
# Setup the connection to the Delphix Engine
server = serversess(engine_address, engine_username, engine_password)
# If we specified a specific database by name....
if arguments["--container"]:
# Get the container object from the name
container_obj = find_container_by_name_and_template_name(
engine, server, arguments["--container"], arguments["--template"]
)
if container_obj:
containers.append(container_obj)
# Else, if we said all containers ...
elif arguments["--all_containers"]:
# Grab all containers in the template
containers = find_all_containers_by_template_name(
engine, server, arguments["--template"]
)
if not containers or len(containers) == 0:
print_error("No containers found with the criterion specified")
return
# reset the running job count before we begin
i = 0
container_threads = []
# While there are still running jobs or containers still to process....
while i > 0 or len(containers) > 0:
# While there are containers still to process and we are still under
# the max simultaneous jobs threshold (if specified)
while len(containers) > 0 and (
arguments["--parallel"] == None or i < int(arguments["--parallel"])
):
# Give us the next database in the list, and remove it from the list
container_obj = containers.pop()
# what do we want to do?
if arguments["--operation"] == "refresh":
# refresh the container
container_threads.append(
container_refresh(engine, server, container_obj)
)
elif arguments["--operation"] == "reset":
container_threads.append(container_reset(engine, server, container_obj))
elif arguments["--operation"] == "start":
container_threads.append(
container_start_async(engine, server, container_obj)
)
elif arguments["--operation"] == "stop":
container_threads.append(
container_stop_async(engine, server, container_obj)
)
elif arguments["--operation"] == "recover":
container_threads.append(
container_recover_async(engine, server, container_obj)
)
elif arguments["--operation"] == "bookmark":
if arguments["--bookmark_tags"]:
tags = arguments["--bookmark_tags"].split(",")
else:
tags = []
if arguments["--bookmark_shared"]:
if str(arguments["--bookmark_shared"]).lower() == "true":
bookmark_shared = True
elif str(arguments["--bookmark_shared"]).lower() == "false":
bookmark_shared = False
else:
print_error(
'Invalid argument "'
+ str(arguments["--bookmark_shared"]).lower()
+ '" for --bookmark_shared'
)
print_error(
"--bookmark_shared only takes a value of true/false."
)
print_error("Exiting")
sys.exit(1)
else:
bookmark_shared = False
container_threads.append(
container_bookmark(
engine,
server,
container_obj,
arguments["--bookmark_name"],
bookmark_shared,
tags,
)
)
# For each thread in the list...
i = len(container_threads)
# Check to see if we are running at max parallel processes, and report if so.
if arguments["--parallel"] != None and i >= int(arguments["--parallel"]):
print_info(engine["hostname"] + ": Max jobs reached (" + str(i) + ")")
# reset the running jobs counter, as we are about to update the count from the jobs report.
i = 0
for t in container_threads:
if t.isAlive():
i += 1
print_info(
engine["hostname"]
+ ": "
+ str(i)
+ " jobs running. "
+ str(len(containers))
+ " jobs waiting to run"
)
# If we have running jobs, pause before repeating the checks.
if i > 0:
sleep(float(arguments["--poll"]))
print("made it out")
# For each thread in the list...
for each in container_threads:
# join them back together so that we wait for all threads to complete before moving on
each.join()
def run_job(engine):
"""
This function runs the main_workflow aynchronously against all the servers specified
"""
# Create an empty list to store threads we create.
threads = []
# If the --all argument was given, run against every engine in dxtools.conf
if arguments["--all"]:
print_info("Executing against all Delphix Engines in the dxtools.conf")
# For each server in the dxtools.conf...
for delphix_engine in dxtools_objects:
engine = dxtools_objects[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine))
else:
# Else if the --engine argument was given, test to see if the engine exists in dxtools.conf
if arguments["--engine"]:
try:
engine = dxtools_objects[arguments["--engine"]]
print_info("Executing against Delphix Engine: " + arguments["--engine"])
except:
print_error(
'Delphix Engine "'
+ arguments["--engine"]
+ '" cannot be found in '
+ config_file_path
)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
# Else if the -d argument was given, test to see if the engine exists in dxtools.conf
elif arguments["-d"]:
try:
engine = dxtools_objects[arguments["-d"]]
print_info("Executing against Delphix Engine: " + arguments["-d"])
except:
print_error(
'Delphix Engine "'
+ arguments["-d"]
+ '" cannot be found in '
+ config_file_path
)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dxtools_objects:
if dxtools_objects[delphix_engine]["default"] == "true":
engine = dxtools_objects[delphix_engine]
print_info(
"Executing against the default Delphix Engine in the dxtools.conf: "
+ dxtools_objects[delphix_engine]["hostname"]
)
break
if engine == None:
print_error("No default engine found. Exiting")
sys.exit(1)
# run the job against the engine
threads.append(main_workflow(engine))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start) / 60, +1)
return elapsed_minutes
def update_jobs_dictionary(engine, server, jobs):
"""
This function checks each job in the dictionary and updates its status or removes it if the job is complete.
Return the number of jobs still running.
"""
# Establish the running jobs counter, as we are about to update the count from the jobs report.
i = 0
# get all the jobs, then inspect them
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug(engine["hostname"] + ": " + str(job_obj))
print_info(engine["hostname"] + ": " + j.name + ": " + job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it from the running jobs list.
del jobs[j]
else:
# If the job is in a running state, increment the running job count.
i += 1
return i
def main(argv):
# We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global dxtools_objects
try:
# Declare globals that will be used throughout the script.
logging_est(arguments["--logdir"])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
config_file_path = arguments["--config"]
# Parse the dxtools.conf and put it into a dictionary
dxtools_objects = get_config(config_file_path)
# This is the function that will handle processing main_workflow for all the servers.
run_job(engine)
elapsed_minutes = time_elapsed()
print_info("script took " + str(elapsed_minutes) + " minutes to get this far.")
# Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_error("Connection failed to the Delphix Engine")
print_error("Please check the ERROR message below")
print_error(e.message)
sys.exit(2)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that we have actionable data
"""
print_error("A job failed in the Delphix Engine")
print_error(e.job)
elapsed_minutes = time_elapsed()
print_info(
basename(__file__)
+ " took "
+ str(elapsed_minutes)
+ " minutes to get this far."
)
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(
basename(__file__)
+ " took "
+ str(elapsed_minutes)
+ " minutes to get this far."
)
except:
"""
Everything else gets caught here
"""
print_error(sys.exc_info()[0])
print_error(traceback.format_exc())
elapsed_minutes = time_elapsed()
print_info(
basename(__file__)
+ " took "
+ str(elapsed_minutes)
+ " minutes to get this far."
)
sys.exit(1)
if __name__ == "__main__":
# Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
print(arguments)
main(arguments)
| |
from rpython.rtyper.test.test_llinterp import gengraph, interpret
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rlib import rgc # Force registration of gc.collect
import gc
import py, sys
def test_collect():
def f():
return gc.collect()
t, typer, graph = gengraph(f, [])
ops = list(graph.iterblockops())
assert len(ops) == 1
op = ops[0][1]
assert op.opname == 'gc__collect'
assert len(op.args) == 0
res = interpret(f, [])
assert res is None
def test_collect_0():
if sys.version_info < (2, 5):
py.test.skip("requires Python 2.5 to call gc.collect() with an arg")
def f():
return gc.collect(0)
t, typer, graph = gengraph(f, [])
ops = list(graph.iterblockops())
assert len(ops) == 1
op = ops[0][1]
assert op.opname == 'gc__collect'
assert len(op.args) == 1
assert op.args[0].value == 0
res = interpret(f, [])
assert res is None
def test_can_move():
T0 = lltype.GcStruct('T')
T1 = lltype.GcArray(lltype.Float)
def f(i):
if i:
return rgc.can_move(lltype.malloc(T0))
else:
return rgc.can_move(lltype.malloc(T1, 1))
t, typer, graph = gengraph(f, [int])
ops = list(graph.iterblockops())
res = [op for op in ops if op[1].opname == 'gc_can_move']
assert len(res) == 2
res = interpret(f, [1])
assert res == True
def test_ll_arraycopy_1():
TYPE = lltype.GcArray(lltype.Signed)
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
for i in range(10): a1[i] = 100 + i
for i in range(6): a2[i] = 200 + i
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
for i in range(10):
assert a1[i] == 100 + i
for i in range(6):
if 2 <= i < 5:
assert a2[i] == a1[i+2]
else:
assert a2[i] == 200 + i
def test_ll_arraycopy_2():
TYPE = lltype.GcArray(lltype.Void)
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
# nothing to assert here, should not crash...
def test_ll_arraycopy_3():
S = lltype.Struct('S') # non-gc
TYPE = lltype.GcArray(lltype.Ptr(S))
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
org1 = [None] * 10
org2 = [None] * 6
for i in range(10): a1[i] = org1[i] = lltype.malloc(S, immortal=True)
for i in range(6): a2[i] = org2[i] = lltype.malloc(S, immortal=True)
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
for i in range(10):
assert a1[i] == org1[i]
for i in range(6):
if 2 <= i < 5:
assert a2[i] == a1[i+2]
else:
assert a2[i] == org2[i]
def test_ll_arraycopy_4():
S = lltype.GcStruct('S')
TYPE = lltype.GcArray(lltype.Ptr(S))
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
org1 = [None] * 10
org2 = [None] * 6
for i in range(10): a1[i] = org1[i] = lltype.malloc(S)
for i in range(6): a2[i] = org2[i] = lltype.malloc(S)
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
for i in range(10):
assert a1[i] == org1[i]
for i in range(6):
if 2 <= i < 5:
assert a2[i] == a1[i+2]
else:
assert a2[i] == org2[i]
def test_ll_arraycopy_5(monkeypatch):
S = lltype.GcStruct('S')
TYPE = lltype.GcArray(lltype.Ptr(S))
def f():
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
rgc.ll_arraycopy(a2, a1, 0, 1, 5)
CHK = lltype.Struct('CHK', ('called', lltype.Bool))
check = lltype.malloc(CHK, immortal=True)
def raw_memcopy(*args):
check.called = True
monkeypatch.setattr(llmemory, "raw_memcopy", raw_memcopy)
interpret(f, [])
assert check.called
def test_ll_arraycopy_array_of_structs():
TP = lltype.GcArray(lltype.Struct('x', ('x', lltype.Signed),
('y', lltype.Signed)))
def f():
a1 = lltype.malloc(TP, 3)
a2 = lltype.malloc(TP, 3)
for i in range(3):
a1[i].x = 2 * i
a1[i].y = 2 * i + 1
rgc.ll_arraycopy(a1, a2, 0, 0, 3)
for i in range(3):
assert a2[i].x == 2 * i
assert a2[i].y == 2 * i + 1
interpret(f, [])
a1 = lltype.malloc(TP, 3)
a2 = lltype.malloc(TP, 3)
a1[1].x = 3
a1[1].y = 15
rgc.copy_struct_item(a1, a2, 1, 2)
assert a2[2].x == 3
assert a2[2].y == 15
def test_ll_arrayclear():
TYPE = lltype.GcArray(lltype.Signed)
a1 = lltype.malloc(TYPE, 10)
for i in range(10):
a1[i] = 100 + i
rgc.ll_arrayclear(a1)
assert len(a1) == 10
for i in range(10):
assert a1[i] == 0
def test__contains_gcptr():
assert not rgc._contains_gcptr(lltype.Signed)
assert not rgc._contains_gcptr(
lltype.Struct('x', ('x', lltype.Signed)))
assert rgc._contains_gcptr(
lltype.Struct('x', ('x', lltype.Signed),
('y', lltype.Ptr(lltype.GcArray(lltype.Signed)))))
assert rgc._contains_gcptr(
lltype.Struct('x', ('x', lltype.Signed),
('y', llmemory.GCREF)))
assert rgc._contains_gcptr(lltype.Ptr(lltype.GcStruct('x')))
assert not rgc._contains_gcptr(lltype.Ptr(lltype.Struct('x')))
GCPTR = lltype.Ptr(lltype.GcStruct('x'))
assert rgc._contains_gcptr(
lltype.Struct('FOO', ('s', lltype.Struct('BAR', ('y', GCPTR)))))
def test_ll_arraycopy_small():
TYPE = lltype.GcArray(lltype.Signed)
for length in range(5):
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
org1 = range(20, 30)
org2 = range(50, 56)
for i in range(len(a1)): a1[i] = org1[i]
for i in range(len(a2)): a2[i] = org2[i]
rgc.ll_arraycopy(a1, a2, 4, 2, length)
for i in range(10):
assert a1[i] == org1[i]
for i in range(6):
if 2 <= i < 2 + length:
assert a2[i] == a1[i+2]
else:
assert a2[i] == org2[i]
def test_ll_shrink_array_1():
py.test.skip("implement ll_shrink_array for GcStructs or GcArrays that "
"don't have the shape of STR or UNICODE")
def test_ll_shrink_array_2():
S = lltype.GcStruct('S', ('x', lltype.Signed),
('vars', lltype.Array(lltype.Signed)))
s1 = lltype.malloc(S, 5)
s1.x = 1234
for i in range(5):
s1.vars[i] = 50 + i
s2 = rgc.ll_shrink_array(s1, 3)
assert lltype.typeOf(s2) == lltype.Ptr(S)
assert s2.x == 1234
assert len(s2.vars) == 3
for i in range(3):
assert s2.vars[i] == 50 + i
def test_get_referents():
class X(object):
__slots__ = ['stuff']
x1 = X()
x1.stuff = X()
x2 = X()
lst = rgc.get_rpy_referents(rgc.cast_instance_to_gcref(x1))
lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst]
assert x1.stuff in lst2
assert x2 not in lst2
def test_get_memory_usage():
class X(object):
pass
x1 = X()
n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1))
assert n >= 8 and n <= 64
def test_register_custom_trace_hook():
TP = lltype.GcStruct('X')
def trace_func():
xxx # should not be annotated here
lambda_trace_func = lambda: trace_func
def f():
rgc.register_custom_trace_hook(TP, lambda_trace_func)
t, typer, graph = gengraph(f, [])
assert typer.custom_trace_funcs == [(TP, trace_func)]
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides base classes for working with drivers
"""
from __future__ import with_statement
import sys
import time
import hashlib
import os
import socket
import random
import binascii
from libcloud.utils.py3 import b
import libcloud.compute.ssh
from libcloud.pricing import get_size_price
from libcloud.compute.types import NodeState, StorageVolumeState,\
DeploymentError
from libcloud.compute.ssh import SSHClient
from libcloud.common.base import ConnectionKey
from libcloud.common.base import BaseDriver
from libcloud.common.types import LibcloudError
from libcloud.compute.ssh import have_paramiko
from libcloud.utils.networking import is_private_subnet
from libcloud.utils.networking import is_valid_ip_address
if have_paramiko:
from paramiko.ssh_exception import SSHException
from paramiko.ssh_exception import AuthenticationException
SSH_TIMEOUT_EXCEPTION_CLASSES = (AuthenticationException, SSHException,
IOError, socket.gaierror, socket.error)
else:
SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, socket.error)
# How long to wait for the node to come online after creating it
NODE_ONLINE_WAIT_TIMEOUT = 10 * 60
# How long to try connecting to a remote SSH server when running a deployment
# script.
SSH_CONNECT_TIMEOUT = 5 * 60
__all__ = [
'Node',
'NodeState',
'NodeSize',
'NodeImage',
'NodeLocation',
'NodeAuthSSHKey',
'NodeAuthPassword',
'NodeDriver',
'StorageVolume',
'StorageVolumeState',
'VolumeSnapshot',
# Deprecated, moved to libcloud.utils.networking
'is_private_subnet',
'is_valid_ip_address'
]
class UuidMixin(object):
"""
Mixin class for get_uuid function.
"""
def __init__(self):
self._uuid = None
def get_uuid(self):
"""
Unique hash for a node, node image, or node size
The hash is a function of an SHA1 hash of the node, node image,
or node size's ID and its driver which means that it should be
unique between all objects of its type.
In some subclasses (e.g. GoGridNode) there is no ID
available so the public IP address is used. This means that,
unlike a properly done system UUID, the same UUID may mean a
different system install at a different time
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.get_uuid()
'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
Note, for example, that this example will always produce the
same UUID!
:rtype: ``str``
"""
if not self._uuid:
self._uuid = hashlib.sha1(b('%s:%s' %
(self.id, self.driver.type))).hexdigest()
return self._uuid
@property
def uuid(self):
return self.get_uuid()
class Node(UuidMixin):
"""
Provide a common interface for handling nodes of all types.
The Node object provides the interface in libcloud through which
we can manipulate nodes in different cloud providers in the same
way. Node objects don't actually do much directly themselves,
instead the node driver handles the connection to the node.
You don't normally create a node object yourself; instead you use
a driver and then have that create the node for you.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.public_ips[0]
'127.0.0.3'
>>> node.name
'dummy-3'
You can also get nodes from the driver's list_node function.
>>> node = driver.list_nodes()[0]
>>> node.name
'dummy-1'
The node keeps a reference to its own driver which means that we
can work on nodes from different providers without having to know
which is which.
>>> driver = DummyNodeDriver(72)
>>> node2 = driver.create_node()
>>> node.driver.creds
0
>>> node2.driver.creds
72
Although Node objects can be subclassed, this isn't normally
done. Instead, any driver specific information is stored in the
"extra" attribute of the node.
>>> node.extra
{'foo': 'bar'}
"""
def __init__(self, id, name, state, public_ips, private_ips,
driver, size=None, image=None, extra=None):
"""
:param id: Node ID.
:type id: ``str``
:param name: Node name.
:type name: ``str``
:param state: Node state.
:type state: :class:`libcloud.compute.types.NodeState`
:param public_ips: Public IP addresses associated with this node.
:type public_ips: ``list``
:param private_ips: Private IP addresses associated with this node.
:type private_ips: ``list``
:param driver: Driver this node belongs to.
:type driver: :class:`.NodeDriver`
:param size: Size of this node. (optional)
:type size: :class:`.NodeSize`
:param image: Image of this node. (optional)
:type size: :class:`.NodeImage`
:param extra: Optional provider specific attributes associated with
this node.
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.state = state
self.public_ips = public_ips if public_ips else []
self.private_ips = private_ips if private_ips else []
self.driver = driver
self.size = size
self.image = image
self.extra = extra or {}
UuidMixin.__init__(self)
def reboot(self):
"""
Reboot this node
:return: ``bool``
This calls the node's driver and reboots the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.state == NodeState.RUNNING
True
>>> node.state == NodeState.REBOOTING
False
>>> node.reboot()
True
>>> node.state == NodeState.REBOOTING
True
"""
return self.driver.reboot_node(self)
def destroy(self):
"""
Destroy this node
:return: ``bool``
This calls the node's driver and destroys the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> from libcloud.compute.types import NodeState
>>> node = driver.create_node()
>>> node.state == NodeState.RUNNING
True
>>> node.destroy()
True
>>> node.state == NodeState.RUNNING
False
"""
return self.driver.destroy_node(self)
def __repr__(self):
state = NodeState.tostring(self.state)
return (('<Node: uuid=%s, name=%s, state=%s, public_ips=%s, '
'private_ips=%s, provider=%s ...>')
% (self.uuid, self.name, state, self.public_ips,
self.private_ips, self.driver.name))
class NodeSize(UuidMixin):
"""
A Base NodeSize class to derive from.
NodeSizes are objects which are typically returned a driver's
list_sizes function. They contain a number of different
parameters which define how big an image is.
The exact parameters available depends on the provider.
N.B. Where a parameter is "unlimited" (for example bandwidth in
Amazon) this will be given as 0.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> size = driver.list_sizes()[0]
>>> size.ram
128
>>> size.bandwidth
500
>>> size.price
4
"""
def __init__(self, id, name, ram, disk, bandwidth, price,
driver, extra=None):
"""
:param id: Size ID.
:type id: ``str``
:param name: Size name.
:type name: ``str``
:param ram: Amount of memory (in MB) provided by this size.
:type ram: ``int``
:param disk: Amount of disk storage (in GB) provided by this image.
:type disk: ``int``
:param bandwidth: Amount of bandiwdth included with this size.
:type bandwidth: ``int``
:param price: Price (in US dollars) of running this node for an hour.
:type price: ``float``
:param driver: Driver this size belongs to.
:type driver: :class:`.NodeDriver`
:param extra: Optional provider specific attributes associated with
this size.
:type extra: ``dict``
"""
self.id = str(id)
self.name = name
self.ram = ram
self.disk = disk
self.bandwidth = bandwidth
self.price = price
self.driver = driver
self.extra = extra or {}
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeSize: id=%s, name=%s, ram=%s disk=%s bandwidth=%s '
'price=%s driver=%s ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name))
class NodeImage(UuidMixin):
"""
An operating system image.
NodeImage objects are typically returned by the driver for the
cloud provider in response to the list_images function
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> image = driver.list_images()[0]
>>> image.name
'Ubuntu 9.10'
Apart from name and id, there is no further standard information;
other parameters are stored in a driver specific "extra" variable
When creating a node, a node image should be given as an argument
to the create_node function to decide which OS image to use.
>>> node = driver.create_node(image=image)
"""
def __init__(self, id, name, driver, extra=None):
"""
:param id: Image ID.
:type id: ``str``
:param name: Image name.
:type name: ``str``
:param driver: Driver this image belongs to.
:type driver: :class:`.NodeDriver`
:param extra: Optional provided specific attributes associated with
this image.
:type extra: ``dict``
"""
self.id = str(id)
self.name = name
self.driver = driver
self.extra = extra or {}
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeImage: id=%s, name=%s, driver=%s ...>')
% (self.id, self.name, self.driver.name))
class NodeLocation(object):
"""
A physical location where nodes can be.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> location = driver.list_locations()[0]
>>> location.country
'US'
"""
def __init__(self, id, name, country, driver):
"""
:param id: Location ID.
:type id: ``str``
:param name: Location name.
:type name: ``str``
:param country: Location country.
:type country: ``str``
:param driver: Driver this location belongs to.
:type driver: :class:`.NodeDriver`
"""
self.id = str(id)
self.name = name
self.country = country
self.driver = driver
def __repr__(self):
return (('<NodeLocation: id=%s, name=%s, country=%s, driver=%s>')
% (self.id, self.name, self.country, self.driver.name))
class NodeAuthSSHKey(object):
"""
An SSH key to be installed for authentication to a node.
This is the actual contents of the users ssh public key which will
normally be installed as root's public key on the node.
>>> pubkey = '...' # read from file
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> k = NodeAuthSSHKey(pubkey)
>>> k
<NodeAuthSSHKey>
"""
def __init__(self, pubkey):
"""
:param pubkey: Public key matetiral.
:type pubkey: ``str``
"""
self.pubkey = pubkey
def __repr__(self):
return '<NodeAuthSSHKey>'
class NodeAuthPassword(object):
"""
A password to be used for authentication to a node.
"""
def __init__(self, password, generated=False):
"""
:param password: Password.
:type password: ``str``
:type generated: ``True`` if this password was automatically generated,
``False`` otherwise.
"""
self.password = password
self.generated = generated
def __repr__(self):
return '<NodeAuthPassword>'
class StorageVolume(UuidMixin):
"""
A base StorageVolume class to derive from.
"""
def __init__(self, id, name, size, driver,
state=None, extra=None):
"""
:param id: Storage volume ID.
:type id: ``str``
:param name: Storage volume name.
:type name: ``str``
:param size: Size of this volume (in GB).
:type size: ``int``
:param driver: Driver this image belongs to.
:type driver: :class:`.NodeDriver`
:param state: Optional state of the StorageVolume. If not
provided, will default to UNKNOWN.
:type state: :class:`.StorageVolumeState`
:param extra: Optional provider specific attributes.
:type extra: ``dict``
"""
self.id = id
self.name = name
self.size = size
self.driver = driver
self.extra = extra
self.state = state
UuidMixin.__init__(self)
def list_snapshots(self):
"""
:rtype: ``list`` of ``VolumeSnapshot``
"""
return self.driver.list_volume_snapshots(volume=self)
def attach(self, node, device=None):
"""
Attach this volume to a node.
:param node: Node to attach volume to
:type node: :class:`.Node`
:param device: Where the device is exposed,
e.g. '/dev/sdb (optional)
:type device: ``str``
:return: ``True`` if attach was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.attach_volume(node=node, volume=self, device=device)
def detach(self):
"""
Detach this volume from its node
:return: ``True`` if detach was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.detach_volume(volume=self)
def snapshot(self, name):
"""
Creates a snapshot of this volume.
:return: Created snapshot.
:rtype: ``VolumeSnapshot``
"""
return self.driver.create_volume_snapshot(volume=self, name=name)
def destroy(self):
"""
Destroy this storage volume.
:return: ``True`` if destroy was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.destroy_volume(volume=self)
def __repr__(self):
return '<StorageVolume id=%s size=%s driver=%s>' % (
self.id, self.size, self.driver.name)
class VolumeSnapshot(object):
"""
A base VolumeSnapshot class to derive from.
"""
def __init__(self, id, driver, size=None, extra=None, created=None):
"""
VolumeSnapshot constructor.
:param id: Snapshot ID.
:type id: ``str``
:param driver: The driver that represents a connection to the
provider
:type driver: `NodeDriver`
:param size: A snapshot size in GB.
:type size: ``int``
:param extra: Provider depends parameters for snapshot.
:type extra: ``dict``
:param created: A datetime object that represents when the
snapshot was created
:type created: ``datetime.datetime``
"""
self.id = id
self.driver = driver
self.size = size
self.extra = extra or {}
self.created = created
def destroy(self):
"""
Destroys this snapshot.
:rtype: ``bool``
"""
return self.driver.destroy_volume_snapshot(snapshot=self)
def __repr__(self):
return ('<VolumeSnapshot id=%s size=%s driver=%s>' %
(self.id, self.size, self.driver.name))
class KeyPair(object):
"""
Represents a SSH key pair.
"""
def __init__(self, name, public_key, fingerprint, driver, private_key=None,
extra=None):
"""
Constructor.
:keyword name: Name of the key pair object.
:type name: ``str``
:keyword fingerprint: Key fingerprint.
:type fingerprint: ``str``
:keyword public_key: Public key in OpenSSH format.
:type public_key: ``str``
:keyword private_key: Private key in PEM format.
:type private_key: ``str``
:keyword extra: Provider specific attributes associated with this
key pair. (optional)
:type extra: ``dict``
"""
self.name = name
self.fingerprint = fingerprint
self.public_key = public_key
self.private_key = private_key
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return ('<KeyPair name=%s fingerprint=%s driver=%s>' %
(self.name, self.fingerprint, self.driver.name))
class NodeDriver(BaseDriver):
"""
A base NodeDriver class to derive from
This class is always subclassed by a specific driver. For
examples of base behavior of most functions (except deploy node)
see the dummy driver.
"""
connectionCls = ConnectionKey
name = None
type = None
port = None
features = {'create_node': []}
"""
List of available features for a driver.
- :meth:`libcloud.compute.base.NodeDriver.create_node`
- ssh_key: Supports :class:`.NodeAuthSSHKey` as an authentication
method for nodes.
- password: Supports :class:`.NodeAuthPassword` as an
authentication
method for nodes.
- generates_password: Returns a password attribute on the Node
object returned from creation.
"""
NODE_STATE_MAP = {}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, **kwargs):
super(NodeDriver, self).__init__(key=key, secret=secret, secure=secure,
host=host, port=port,
api_version=api_version, **kwargs)
def list_nodes(self):
"""
List all nodes.
:return: list of node objects
:rtype: ``list`` of :class:`.Node`
"""
raise NotImplementedError(
'list_nodes not implemented for this driver')
def list_sizes(self, location=None):
"""
List sizes on a provider
:param location: The location at which to list sizes
:type location: :class:`.NodeLocation`
:return: list of node size objects
:rtype: ``list`` of :class:`.NodeSize`
"""
raise NotImplementedError(
'list_sizes not implemented for this driver')
def list_locations(self):
"""
List data centers for a provider
:return: list of node location objects
:rtype: ``list`` of :class:`.NodeLocation`
"""
raise NotImplementedError(
'list_locations not implemented for this driver')
def create_node(self, **kwargs):
"""
Create a new node instance. This instance will be started
automatically.
Not all hosting API's are created equal and to allow libcloud to
support as many as possible there are some standard supported
variations of ``create_node``. These are declared using a
``features`` API.
You can inspect ``driver.features['create_node']`` to see what
variation of the API you are dealing with:
``ssh_key``
You can inject a public key into a new node allows key based SSH
authentication.
``password``
You can inject a password into a new node for SSH authentication.
If no password is provided libcloud will generated a password.
The password will be available as
``return_value.extra['password']``.
``generates_password``
The hosting provider will generate a password. It will be returned
to you via ``return_value.extra['password']``.
Some drivers allow you to set how you will authenticate with the
instance that is created. You can inject this initial authentication
information via the ``auth`` parameter.
If a driver supports the ``ssh_key`` feature flag for ``created_node``
you can upload a public key into the new instance::
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> auth = NodeAuthSSHKey('pubkey data here')
>>> node = driver.create_node("test_node", auth=auth)
If a driver supports the ``password`` feature flag for ``create_node``
you can set a password::
>>> driver = DummyNodeDriver(0)
>>> auth = NodeAuthPassword('mysecretpassword')
>>> node = driver.create_node("test_node", auth=auth)
If a driver supports the ``password`` feature and you don't provide the
``auth`` argument libcloud will assign a password::
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node("test_node")
>>> password = node.extra['password']
A password will also be returned in this way for drivers that declare
the ``generates_password`` feature, though in that case the password is
actually provided to the driver API by the hosting provider rather than
generated by libcloud.
You can only pass a :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey` to ``create_node`` via the auth parameter if
has the corresponding feature flag.
:param name: String with a name for this new node (required)
:type name: ``str``
:param size: The size of resources allocated to this node.
(required)
:type size: :class:`.NodeSize`
:param image: OS Image to boot on node. (required)
:type image: :class:`.NodeImage`
:param location: Which data center to create a node in. If empty,
undefined behavior will be selected. (optional)
:type location: :class:`.NodeLocation`
:param auth: Initial authentication information for the node
(optional)
:type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
:return: The newly created node.
:rtype: :class:`.Node`
"""
raise NotImplementedError(
'create_node not implemented for this driver')
def deploy_node(self, **kwargs):
"""
Create a new node, and start deployment.
In order to be able to SSH into a created node access credentials are
required.
A user can pass either a :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey` to the ``auth`` argument. If the
``create_node`` implementation supports that kind if credential (as
declared in ``self.features['create_node']``) then it is passed on to
``create_node``. Otherwise it is not passed on to ``create_node`` and
it is only used for authentication.
If the ``auth`` parameter is not supplied but the driver declares it
supports ``generates_password`` then the password returned by
``create_node`` will be used to SSH into the server.
Finally, if the ``ssh_key_file`` is supplied that key will be used to
SSH into the server.
This function may raise a :class:`DeploymentException`, if a
create_node call was successful, but there is a later error (like SSH
failing or timing out). This exception includes a Node object which
you may want to destroy if incomplete deployments are not desirable.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> from libcloud.compute.deployment import ScriptDeployment
>>> from libcloud.compute.deployment import MultiStepDeployment
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> driver = DummyNodeDriver(0)
>>> key = NodeAuthSSHKey('...') # read from file
>>> script = ScriptDeployment("yum -y install emacs strace tcpdump")
>>> msd = MultiStepDeployment([key, script])
>>> def d():
... try:
... driver.deploy_node(deploy=msd)
... except NotImplementedError:
... print ("not implemented for dummy driver")
>>> d()
not implemented for dummy driver
Deploy node is typically not overridden in subclasses. The
existing implementation should be able to handle most such.
:param deploy: Deployment to run once machine is online and
available to SSH.
:type deploy: :class:`Deployment`
:param ssh_username: Optional name of the account which is used
when connecting to
SSH server (default is root)
:type ssh_username: ``str``
:param ssh_alternate_usernames: Optional list of ssh usernames to
try to connect with if using the
default one fails
:type ssh_alternate_usernames: ``list``
:param ssh_port: Optional SSH server port (default is 22)
:type ssh_port: ``int``
:param ssh_timeout: Optional SSH connection timeout in seconds
(default is 10)
:type ssh_timeout: ``float``
:param auth: Initial authentication information for the node
(optional)
:type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
:param ssh_key: A path (or paths) to an SSH private key with which
to attempt to authenticate. (optional)
:type ssh_key: ``str`` or ``list`` of ``str``
:param timeout: How many seconds to wait before timing out.
(default is 600)
:type timeout: ``int``
:param max_tries: How many times to retry if a deployment fails
before giving up (default is 3)
:type max_tries: ``int``
:param ssh_interface: The interface to wait for. Default is
'public_ips', other option is 'private_ips'.
:type ssh_interface: ``str``
"""
if not libcloud.compute.ssh.have_paramiko:
raise RuntimeError('paramiko is not installed. You can install ' +
'it using pip: pip install paramiko')
if 'auth' in kwargs:
auth = kwargs['auth']
if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)):
raise NotImplementedError(
'If providing auth, only NodeAuthSSHKey or'
'NodeAuthPassword is supported')
elif 'ssh_key' in kwargs:
# If an ssh_key is provided we can try deploy_node
pass
elif 'create_node' in self.features:
f = self.features['create_node']
if 'generates_password' not in f and "password" not in f:
raise NotImplementedError(
'deploy_node not implemented for this driver')
else:
raise NotImplementedError(
'deploy_node not implemented for this driver')
node = self.create_node(**kwargs)
max_tries = kwargs.get('max_tries', 3)
password = None
if 'auth' in kwargs:
if isinstance(kwargs['auth'], NodeAuthPassword):
password = kwargs['auth'].password
elif 'password' in node.extra:
password = node.extra['password']
ssh_interface = kwargs.get('ssh_interface', 'public_ips')
# Wait until node is up and running and has IP assigned
try:
node, ip_addresses = self.wait_until_running(
nodes=[node],
wait_period=3,
timeout=kwargs.get('timeout', NODE_ONLINE_WAIT_TIMEOUT),
ssh_interface=ssh_interface)[0]
except Exception:
e = sys.exc_info()[1]
raise DeploymentError(node=node, original_exception=e, driver=self)
ssh_username = kwargs.get('ssh_username', 'root')
ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', [])
ssh_port = kwargs.get('ssh_port', 22)
ssh_timeout = kwargs.get('ssh_timeout', 10)
ssh_key_file = kwargs.get('ssh_key', None)
timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT)
deploy_error = None
for username in ([ssh_username] + ssh_alternate_usernames):
try:
self._connect_and_run_deployment_script(
task=kwargs['deploy'], node=node,
ssh_hostname=ip_addresses[0], ssh_port=ssh_port,
ssh_username=username, ssh_password=password,
ssh_key_file=ssh_key_file, ssh_timeout=ssh_timeout,
timeout=timeout, max_tries=max_tries)
except Exception:
# Try alternate username
# Todo: Need to fix paramiko so we can catch a more specific
# exception
e = sys.exc_info()[1]
deploy_error = e
else:
# Script successfully executed, don't try alternate username
deploy_error = None
break
if deploy_error is not None:
raise DeploymentError(node=node, original_exception=deploy_error,
driver=self)
return node
def reboot_node(self, node):
"""
Reboot a node.
:param node: The node to be rebooted
:type node: :class:`.Node`
:return: True if the reboot was successful, otherwise False
:rtype: ``bool``
"""
raise NotImplementedError(
'reboot_node not implemented for this driver')
def destroy_node(self, node):
"""
Destroy a node.
Depending upon the provider, this may destroy all data associated with
the node, including backups.
:param node: The node to be destroyed
:type node: :class:`.Node`
:return: True if the destroy was successful, False otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'destroy_node not implemented for this driver')
##
# Volume and snapshot management methods
##
def list_volumes(self):
"""
List storage volumes.
:rtype: ``list`` of :class:`.StorageVolume`
"""
raise NotImplementedError(
'list_volumes not implemented for this driver')
def list_volume_snapshots(self, volume):
"""
List snapshots for a storage volume.
:rtype: ``list`` of :class:`VolumeSnapshot`
"""
raise NotImplementedError(
'list_volume_snapshots not implemented for this driver')
def create_volume(self, size, name, location=None, snapshot=None):
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
raise NotImplementedError(
'create_volume not implemented for this driver')
def create_volume_snapshot(self, volume, name=None):
"""
Creates a snapshot of the storage volume.
:param volume: The StorageVolume to create a VolumeSnapshot from
:type volume: :class:`.VolumeSnapshot`
:param name: Name of created snapshot (optional)
:type name: `str`
:rtype: :class:`VolumeSnapshot`
"""
raise NotImplementedError(
'create_volume_snapshot not implemented for this driver')
def attach_volume(self, node, volume, device=None):
"""
Attaches volume to node.
:param node: Node to attach volume to.
:type node: :class:`.Node`
:param volume: Volume to attach.
:type volume: :class:`.StorageVolume`
:param device: Where the device is exposed, e.g. '/dev/sdb'
:type device: ``str``
:rytpe: ``bool``
"""
raise NotImplementedError('attach not implemented for this driver')
def detach_volume(self, volume):
"""
Detaches a volume from a node.
:param volume: Volume to be detached
:type volume: :class:`.StorageVolume`
:rtype: ``bool``
"""
raise NotImplementedError('detach not implemented for this driver')
def destroy_volume(self, volume):
"""
Destroys a storage volume.
:param volume: Volume to be destroyed
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
raise NotImplementedError(
'destroy_volume not implemented for this driver')
def destroy_volume_snapshot(self, snapshot):
"""
Destroys a snapshot.
:param snapshot: The snapshot to delete
:type snapshot: :class:`VolumeSnapshot`
:rtype: :class:`bool`
"""
raise NotImplementedError(
'destroy_volume_snapshot not implemented for this driver')
##
# Image management methods
##
def list_images(self, location=None):
"""
List images on a provider.
:param location: The location at which to list images.
:type location: :class:`.NodeLocation`
:return: list of node image objects.
:rtype: ``list`` of :class:`.NodeImage`
"""
raise NotImplementedError(
'list_images not implemented for this driver')
def create_image(self, node, name, description=None):
"""
Creates an image from a node object.
:param node: Node to run the task on.
:type node: :class:`.Node`
:param name: name for new image.
:type name: ``str``
:param description: description for new image.
:type name: ``description``
:rtype: :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'create_image not implemented for this driver')
def delete_image(self, node_image):
"""
Deletes a node image from a provider.
:param node_image: Node image object.
:type node_image: :class:`.NodeImage`
:return: ``True`` if delete_image was successful, ``False`` otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_image not implemented for this driver')
def get_image(self, image_id):
"""
Returns a single node image from a provider.
:param image_id: Node to run the task on.
:type image_id: ``str``
:rtype :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'get_image not implemented for this driver')
def copy_image(self, source_region, node_image, name, description=None):
"""
Copies an image from a source region to the current region.
:param source_region: Region to copy the node from.
:type source_region: ``str``
:param node_image: NodeImage to copy.
:type node_image: :class:`.NodeImage`:
:param name: name for new image.
:type name: ``str``
:param description: description for new image.
:type name: ``str``
:rtype: :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'copy_image not implemented for this driver')
##
# SSH key pair management methods
##
def list_key_pairs(self):
"""
List all the available key pair objects.
:rtype: ``list`` of :class:`.KeyPair` objects
"""
raise NotImplementedError(
'list_key_pairs not implemented for this driver')
def get_key_pair(self, name):
"""
Retrieve a single key pair.
:param name: Name of the key pair to retrieve.
:type name: ``str``
:rtype: :class:`.KeyPair`
"""
raise NotImplementedError(
'get_key_pair not implemented for this driver')
def create_key_pair(self, name):
"""
Create a new key pair object.
:param name: Key pair name.
:type name: ``str``
"""
raise NotImplementedError(
'create_key_pair not implemented for this driver')
def import_key_pair_from_string(self, name, key_material):
"""
Import a new public key from string.
:param name: Key pair name.
:type name: ``str``
:param key_material: Public key material.
:type key_material: ``str``
:rtype: :class:`.KeyPair` object
"""
raise NotImplementedError(
'import_key_pair_from_string not implemented for this driver')
def import_key_pair_from_file(self, name, key_file_path):
"""
Import a new public key from string.
:param name: Key pair name.
:type name: ``str``
:param key_file_path: Path to the public key file.
:type key_file_path: ``str``
:rtype: :class:`.KeyPair` object
"""
key_file_path = os.path.expanduser(key_file_path)
with open(key_file_path, 'r') as fp:
key_material = fp.read()
return self.import_key_pair_from_string(name=name,
key_material=key_material)
def delete_key_pair(self, key_pair):
"""
Delete an existing key pair.
:param key_pair: Key pair object.
:type key_pair: :class:`.KeyPair`
"""
raise NotImplementedError(
'delete_key_pair not implemented for this driver')
def wait_until_running(self, nodes, wait_period=3, timeout=600,
ssh_interface='public_ips', force_ipv4=True):
"""
Block until the provided nodes are considered running.
Node is considered running when it's state is "running" and when it has
at least one IP address assigned.
:param nodes: List of nodes to wait for.
:type nodes: ``list`` of :class:`.Node`
:param wait_period: How many seconds to wait between each loop
iteration. (default is 3)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 600)
:type timeout: ``int``
:param ssh_interface: Which attribute on the node to use to obtain
an IP address. Valid options: public_ips,
private_ips. Default is public_ips.
:type ssh_interface: ``str``
:param force_ipv4: Ignore IPv6 addresses (default is True).
:type force_ipv4: ``bool``
:return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and
list of ip_address on success.
:rtype: ``list`` of ``tuple``
"""
def is_supported(address):
"""
Return True for supported address.
"""
if force_ipv4 and not is_valid_ip_address(address=address,
family=socket.AF_INET):
return False
return True
def filter_addresses(addresses):
"""
Return list of supported addresses.
"""
return [address for address in addresses if is_supported(address)]
if ssh_interface not in ['public_ips', 'private_ips']:
raise ValueError('ssh_interface argument must either be' +
'public_ips or private_ips')
start = time.time()
end = start + timeout
uuids = set([node.uuid for node in nodes])
while time.time() < end:
all_nodes = self.list_nodes()
matching_nodes = list([node for node in all_nodes
if node.uuid in uuids])
if len(matching_nodes) > len(uuids):
found_uuids = [node.uuid for node in matching_nodes]
msg = ('Unable to match specified uuids ' +
'(%s) with existing nodes. Found ' % (uuids) +
'multiple nodes with same uuid: (%s)' % (found_uuids))
raise LibcloudError(value=msg, driver=self)
running_nodes = [node for node in matching_nodes
if node.state == NodeState.RUNNING]
addresses = [filter_addresses(getattr(node, ssh_interface))
for node in running_nodes]
if len(running_nodes) == len(uuids) == len(addresses):
return list(zip(running_nodes, addresses))
else:
time.sleep(wait_period)
continue
raise LibcloudError(value='Timed out after %s seconds' % (timeout),
driver=self)
def _get_and_check_auth(self, auth):
"""
Helper function for providers supporting :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey`
Validates that only a supported object type is passed to the auth
parameter and raises an exception if it is not.
If no :class:`.NodeAuthPassword` object is provided but one is expected
then a password is automatically generated.
"""
if isinstance(auth, NodeAuthPassword):
if 'password' in self.features['create_node']:
return auth
raise LibcloudError(
'Password provided as authentication information, but password'
'not supported', driver=self)
if isinstance(auth, NodeAuthSSHKey):
if 'ssh_key' in self.features['create_node']:
return auth
raise LibcloudError(
'SSH Key provided as authentication information, but SSH Key'
'not supported', driver=self)
if 'password' in self.features['create_node']:
value = os.urandom(16)
value = binascii.hexlify(value).decode('ascii')
# Some providers require password to also include uppercase
# characters so convert some characters to uppercase
password = ''
for char in value:
if not char.isdigit() and char.islower():
if random.randint(0, 1) == 1:
char = char.upper()
password += char
return NodeAuthPassword(password, generated=True)
if auth:
raise LibcloudError(
'"auth" argument provided, but it was not a NodeAuthPassword'
'or NodeAuthSSHKey object', driver=self)
def _wait_until_running(self, node, wait_period=3, timeout=600,
ssh_interface='public_ips', force_ipv4=True):
# This is here for backward compatibility and will be removed in the
# next major release
return self.wait_until_running(nodes=[node], wait_period=wait_period,
timeout=timeout,
ssh_interface=ssh_interface,
force_ipv4=force_ipv4)
def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300):
"""
Try to connect to the remote SSH server. If a connection times out or
is refused it is retried up to timeout number of seconds.
:param ssh_client: A configured SSHClient instance
:type ssh_client: ``SSHClient``
:param wait_period: How many seconds to wait between each loop
iteration. (default is 1.5)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 300)
:type timeout: ``int``
:return: ``SSHClient`` on success
"""
start = time.time()
end = start + timeout
while time.time() < end:
try:
ssh_client.connect()
except SSH_TIMEOUT_EXCEPTION_CLASSES:
e = sys.exc_info()[1]
message = str(e).lower()
expected_msg = 'no such file or directory'
if isinstance(e, IOError) and expected_msg in message:
# Propagate (key) file doesn't exist errors
raise e
# Retry if a connection is refused, timeout occurred,
# or the connection fails due to failed authentication.
ssh_client.close()
time.sleep(wait_period)
continue
else:
return ssh_client
raise LibcloudError(value='Could not connect to the remote SSH ' +
'server. Giving up.', driver=self)
def _connect_and_run_deployment_script(self, task, node, ssh_hostname,
ssh_port, ssh_username,
ssh_password, ssh_key_file,
ssh_timeout, timeout, max_tries):
"""
Establish an SSH connection to the node and run the provided deployment
task.
:rtype: :class:`.Node`:
:return: Node instance on success.
"""
ssh_client = SSHClient(hostname=ssh_hostname,
port=ssh_port, username=ssh_username,
password=ssh_password,
key_files=ssh_key_file,
timeout=ssh_timeout)
ssh_client = self._ssh_client_connect(ssh_client=ssh_client,
timeout=timeout)
# Execute the deployment task
node = self._run_deployment_script(task=task, node=node,
ssh_client=ssh_client,
max_tries=max_tries)
return node
def _run_deployment_script(self, task, node, ssh_client, max_tries=3):
"""
Run the deployment script on the provided node. At this point it is
assumed that SSH connection has already been established.
:param task: Deployment task to run.
:type task: :class:`Deployment`
:param node: Node to run the task on.
:type node: ``Node``
:param ssh_client: A configured and connected SSHClient instance.
:type ssh_client: :class:`SSHClient`
:param max_tries: How many times to retry if a deployment fails
before giving up. (default is 3)
:type max_tries: ``int``
:rtype: :class:`.Node`
:return: ``Node`` Node instance on success.
"""
tries = 0
while tries < max_tries:
try:
node = task.run(node, ssh_client)
except Exception:
tries += 1
if tries >= max_tries:
e = sys.exc_info()[1]
raise LibcloudError(value='Failed after %d tries: %s'
% (max_tries, str(e)), driver=self)
else:
# Deployment succeeded
ssh_client.close()
return node
def _get_size_price(self, size_id):
"""
Return pricing information for the provided size id.
"""
return get_size_price(driver_type='compute',
driver_name=self.api_name,
size_id=size_id)
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
"""
Form Widget classes specific to the Django admin site.
"""
from itertools import chain
from django import forms
from django.forms.widgets import RadioFieldRenderer, RadioChoiceInput
import sys
if sys.version_info.major < 3:
from django.utils.encoding import force_unicode as force_text
else:
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.translation import ugettext as _
from xadmin.util import vendor
class AdminDateWidget(forms.DateInput):
@property
def media(self):
return vendor('datepicker.js', 'datepicker.css', 'xadmin.widget.datetime.js')
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'date-field', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
def render(self, name, value, attrs=None):
input_html = super(AdminDateWidget, self).render(name, value, attrs)
return mark_safe('<div class="input-group date bootstrap-datepicker"><span class="input-group-addon"><i class="fa fa-calendar"></i></span>%s'
'<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Today')))
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
return vendor('datepicker.js','timepicker.js', 'timepicker.css', 'xadmin.widget.datetime.js')
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'time-field', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
def render(self, name, value, attrs=None):
input_html = super(AdminTimeWidget, self).render(name, value, attrs)
return mark_safe('<div class="input-group time bootstrap-timepicker"><span class="input-group-addon"><i class="fa fa-clock-o">'
'</i></span>%s<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Now')))
class AdminSelectWidget(forms.Select):
@property
def media(self):
return vendor('select.js', 'select.css', 'xadmin.widget.select.js')
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<div class="datetime clearfix">%s%s</div>' %
(rendered_widgets[0], rendered_widgets[1]))
class AdminRadioInput(RadioChoiceInput):
def render(self, name=None, value=None, attrs=None, choices=()):
name = name or self.name
value = value or self.value
attrs = attrs or self.attrs
attrs['class'] = attrs.get('class', '').replace('form-control', '')
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_text(self.choice_label))
if attrs.get('inline', False):
return mark_safe(u'<label%s class="radio-inline">%s %s</label>' % (label_for, self.tag(), choice_label))
else:
return mark_safe(u'<div class="radio"><label%s>%s %s</label></div>' % (label_for, self.tag(), choice_label))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def __iter__(self):
for i, choice in enumerate(self.choices):
yield AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def render(self):
return mark_safe(u'\n'.join([force_text(w) for w in self]))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminCheckboxSelect(forms.CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
output = []
# Normalize to strings
str_values = set([force_text(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = forms.CheckboxInput(
final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_text(option_label))
if final_attrs.get('inline', False):
output.append(u'<label%s class="checkbox-inline">%s %s</label>' % (label_for, rendered_cb, option_label))
else:
output.append(u'<div class="checkbox"><label%s>%s %s</label></div>' % (label_for, rendered_cb, option_label))
return mark_safe(u'\n'.join(output))
class AdminSelectMultiple(forms.SelectMultiple):
def __init__(self, attrs=None):
final_attrs = {'class': 'select-multi'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminSelectMultiple, self).__init__(attrs=final_attrs)
class AdminFileWidget(forms.ClearableFileInput):
@property
def media(self):
return vendor('filestyle.js')
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def render(self, name, value, attrs=None):
attrs["class"] = "filestyle"
attrs["data-icon"] = "false"
attrs["data-buttonText"] = _('Choose file')
return super(AdminFileWidget, self).render(name, value, attrs)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'textarea-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'text-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'url-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'int-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'sep-int-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget,
self).__init__(attrs=final_attrs)
class AdminColorPickerWidget(forms.TextInput):
@property
def media(self):
return vendor('spectrum.js', 'spectrum.css', 'xadmin.widget.colorpicker.js')
def __init__(self, attrs=None):
final_attrs = {'class': 'colorpicker'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminColorPickerWidget, self).__init__(attrs=final_attrs)
class AdminOpenStreetMapWidget(forms.TextInput):
@property
def media(self):
return vendor('openlayers.js', 'openlayers.css', 'xadmin.widget.openstreetmap.js')
def __init__(self, attrs=None):
final_attrs = {'class': 'openstreetmap'}
from django.conf import settings
if hasattr(settings, 'OSM_COORDINATES_ZOOM'):
final_attrs['zoom'] = settings.OSM_COORDINATES_ZOOM
if hasattr(settings, 'OSM_COORDINATES_CENTER'):
final_attrs['center'] = settings.OSM_COORDINATES_CENTER
final_attrs['style'] = 'display:none;';
if attrs is not None:
final_attrs.update(attrs)
super(AdminOpenStreetMapWidget, self).__init__(attrs=final_attrs)
| |
from corehq.apps.accounting.models import BillingAccount
from django.utils.translation import ugettext as _
from corehq.apps.sms.models import INCOMING, OUTGOING
from django.db.models.aggregates import Count
from couchexport.models import Format
from dimagi.utils.dates import DateSpan
from corehq.apps.accounting.filters import DateCreatedFilter, NameFilter
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.smsbillables.dispatcher import SMSAdminInterfaceDispatcher
from corehq.apps.smsbillables.filters import (
CountryCodeFilter,
DateSentFilter,
DirectionFilter,
DomainFilter,
GatewayTypeFilter,
HasGatewayFeeFilter,
ShowBillablesFilter,
SpecificGateway,
)
from corehq.apps.smsbillables.models import (
SmsBillable,
SmsGatewayFee,
SmsGatewayFeeCriteria,
)
class SMSBillablesInterface(GenericTabularReport):
base_template = "accounting/report_filter_actions.html"
section_name = "Accounting"
dispatcher = SMSAdminInterfaceDispatcher
name = "SMS Billables"
description = "List of all SMS Billables"
slug = "sms_billables"
ajax_pagination = True
exportable = True
exportable_all = True
export_format_override = Format.UNZIPPED_CSV
fields = [
'corehq.apps.smsbillables.interface.DateSentFilter',
'corehq.apps.accounting.interface.DateCreatedFilter',
'corehq.apps.smsbillables.interface.ShowBillablesFilter',
'corehq.apps.accounting.interface.NameFilter',
'corehq.apps.smsbillables.interface.DomainFilter',
'corehq.apps.smsbillables.interface.HasGatewayFeeFilter',
'corehq.apps.smsbillables.interface.GatewayTypeFilter',
]
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Date of Message"),
DataTablesColumn("Account Name"),
DataTablesColumn("Project Space"),
DataTablesColumn("Direction"),
DataTablesColumn("SMS parts"),
DataTablesColumn("Gateway", sortable=False),
DataTablesColumn("Gateway Charge", sortable=False),
DataTablesColumn("Usage Charge", sortable=False),
DataTablesColumn("Total Charge", sortable=False),
DataTablesColumn("Message Log ID", sortable=False),
DataTablesColumn("Is Valid?", sortable=False),
DataTablesColumn("Date Created"),
)
@property
def sort_field(self):
sort_fields = [
'date_sent',
'domain',
'direction',
'multipart_count',
None,
None,
None,
None,
None,
'date_created',
]
sort_index = int(self.request.GET.get('iSortCol_0', 1))
field = sort_fields[sort_index]
sort_descending = self.request.GET.get('sSortDir_0', 'asc') == 'desc'
return field if not sort_descending else '-{0}'.format(field)
@property
def shared_pagination_GET_params(self):
return DateSentFilter.shared_pagination_GET_params(self.request) + \
DateCreatedFilter.shared_pagination_GET_params(self.request) + [
{
'name': DateCreatedFilter.optional_filter_slug(),
'value': DateCreatedFilter.optional_filter_string_value(self.request)
},
{
'name': ShowBillablesFilter.slug,
'value': ShowBillablesFilter.get_value(self.request, self.domain)
},
{
'name': NameFilter.slug,
'value': NameFilter.get_value(self.request, self.domain)
},
{
'name': DomainFilter.slug,
'value': DomainFilter.get_value(self.request, self.domain)
},
{
'name': HasGatewayFeeFilter.slug,
'value': HasGatewayFeeFilter.get_value(self.request, self.domain)
},
{
'name': GatewayTypeFilter.slug,
'value': GatewayTypeFilter.get_value(self.request, self.domain)
},
]
@property
def get_all_rows(self):
query = self.sms_billables
query = query.order_by(self.sort_field)
return self._format_billables(query)
@property
def total_records(self):
query = self.sms_billables
return query.aggregate(Count('id'))['id__count']
@property
def rows(self):
query = self.sms_billables
query = query.order_by(self.sort_field)
sms_billables = query[self.pagination.start:(self.pagination.start + self.pagination.count)]
return self._format_billables(sms_billables)
@property
def sms_billables(self):
datespan = DateSpan(DateSentFilter.get_start_date(self.request), DateSentFilter.get_end_date(self.request))
selected_billables = SmsBillable.get_billables_sent_between(datespan)
if DateCreatedFilter.use_filter(self.request):
date_span = DateSpan(
DateCreatedFilter.get_start_date(self.request), DateCreatedFilter.get_end_date(self.request)
)
selected_billables = SmsBillable.filter_selected_billables_by_date(selected_billables, date_span)
show_billables = ShowBillablesFilter.get_value(
self.request, self.domain)
if show_billables:
selected_billables = SmsBillable.filter_selected_billables_show_billables(
selected_billables, show_billables,
)
account_name = NameFilter.get_value(self.request, self.domain)
if account_name:
selected_billables = SmsBillable.filter_selected_billables_by_account(selected_billables, account_name)
domain = DomainFilter.get_value(self.request, self.domain)
if domain:
selected_billables = selected_billables.filter(
domain=domain,
)
has_gateway_fee = HasGatewayFeeFilter.get_value(
self.request, self.domain
)
if has_gateway_fee:
if has_gateway_fee == HasGatewayFeeFilter.YES:
selected_billables = selected_billables.exclude(
gateway_fee=None
)
else:
selected_billables = selected_billables.filter(
gateway_fee=None
)
gateway_type = GatewayTypeFilter.get_value(self.request, self.domain)
if gateway_type:
selected_billables = selected_billables.filter(
gateway_fee__criteria__backend_api_id=gateway_type,
)
return selected_billables
def _format_billables(self, sms_billables):
return [
[
sms_billable.date_sent,
BillingAccount.get_account_by_domain(sms_billable.domain).name,
sms_billable.domain,
{
INCOMING: _("Incoming"),
OUTGOING: _("Outgoing"),
}.get(sms_billable.direction, ""),
sms_billable.multipart_count,
sms_billable.gateway_fee.criteria.backend_api_id if sms_billable.gateway_fee else "",
sms_billable.gateway_charge,
sms_billable.usage_charge,
sms_billable.gateway_charge + sms_billable.usage_charge,
sms_billable.log_id,
sms_billable.is_valid,
sms_billable.date_created,
]
for sms_billable in sms_billables
]
class SMSGatewayFeeCriteriaInterface(GenericTabularReport):
base_template = "accounting/report_filter_actions.html"
section_name = "Accounting"
dispatcher = SMSAdminInterfaceDispatcher
name = "SMS Gateway Fee Criteria"
description = "List of all SMS Gateway Fee Criteria"
slug = "sms_gateway_fee_criteria"
exportable = True
exportable_all = True
fields = [
'corehq.apps.smsbillables.interface.GatewayTypeFilter',
'corehq.apps.smsbillables.interface.SpecificGateway',
'corehq.apps.smsbillables.interface.DirectionFilter',
'corehq.apps.smsbillables.interface.CountryCodeFilter',
]
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Gateway Type"),
DataTablesColumn("Specific Gateway"),
DataTablesColumn("Direction"),
DataTablesColumn("Country Code"),
DataTablesColumn("Prefix"),
DataTablesColumn("Fee (Amount, Currency)"),
DataTablesColumn("Is Active"),
)
@property
def get_all_rows(self):
return self.rows
@property
def rows(self):
rows = []
for criteria in self.sms_gateway_fee_criteria:
gateway_fee = SmsGatewayFee.get_by_criteria_obj(criteria)
rows.append([
criteria.backend_api_id,
(criteria.backend_instance
if criteria.backend_instance is not None else "Any"),
criteria.direction,
(criteria.country_code
if criteria.country_code is not None else "Any"),
criteria.prefix or "Any",
"%(amount)s %(currency)s" % {
'amount': str(gateway_fee.amount),
'currency': gateway_fee.currency.code,
},
criteria.is_active,
])
return rows
@property
def sms_gateway_fee_criteria(self):
selected_criteria = SmsGatewayFeeCriteria.objects.filter()
gateway_type = GatewayTypeFilter.get_value(self.request, self.domain)
if gateway_type:
selected_criteria = selected_criteria.filter(
backend_api_id=gateway_type,
)
specific_gateway = SpecificGateway.get_value(self.request, self.domain)
if specific_gateway:
selected_criteria = selected_criteria.filter(
backend_instance=specific_gateway,
)
direction = DirectionFilter.get_value(self.request, self.domain)
if direction:
selected_criteria = selected_criteria.filter(
direction=direction,
)
country_code = CountryCodeFilter.get_value(self.request, self.domain)
if country_code:
selected_criteria = selected_criteria.filter(
country_code=int(country_code),
)
return selected_criteria
| |
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.encoding import smart_unicode
from django.core.exceptions import ObjectDoesNotExist
from friends.models import Friend
from users.models import UserInfo
from utils.packed_json import toJSON
from utils.packed_jpush import jpush_send_message
import json
import time as _time
from datetime import datetime
from django.contrib.auth.models import User
import logging
logger = logging.getLogger(__name__)
def recommend(request):
recommend_friends = {}
friend_list = []
friends = User.objects.all()
for friend in friends:
if(friend.username == "root"):
continue
logger.debug("friend is : %s" %friend.username)
friend_info = UserInfo.objects.get(user=friend)
recommend = {}
recommend['u'] = friend.username
recommend['a'] = friend_info.avatar_url()
friend_list.append(recommend)
recommend_friends['recommend'] = friend_list
return HttpResponse(toJSON(recommend_friends))
def add_friend(request):
data = {}
if request.method == 'POST':
logger.debug(str(request.POST))
user_name = request.POST.get('username')
logger.debug("src user name : "+user_name)
try:
src_user = User.objects.get(username = user_name)
except ObjectDoesNotExist:
data['status']=34
return HttpResponse(toJSON(data))
target_user=request.POST.get('target_user')
# comment: for identify who that add
# comment = request.POST.get('comment')
try:
add_user = User.objects.get(username=target_user)
try:
check_friend = Friend.objects.get(handle=src_user, friend=add_user)
logger.debug("friend already add, skip it")
except ObjectDoesNotExist:
wait_friend = Friend.objects.create(handle = src_user, friend=add_user)
wait_friend.verify_status = 2
wait_friend.save()
push_data = {}
push_data = wait_friend.get_friend_info()
logger.debug("friend info : "+str(push_data))
jpush_send_message(toJSON(push_data), target_user, 1002)
data['status']=0
return HttpResponse(toJSON(data))
except ObjectDoesNotExist:
data['status']=28
return HttpResponse(toJSON(data))
def accept_friend(request):
data = {}
if request.method == 'POST':
logger.debug(str(request.POST))
user_name = request.POST.get('username')
# 1: agree, 0: disagree
nok = request.POST.get('nok')
try:
src_user = User.objects.get(username = user_name)
except ObjectDoesNotExist:
data['status']=34
return HttpResponse(toJSON(data))
target_user=request.POST.get('target_user')
try:
target = User.objects.get(username=target_user)
origin = User.objects.get(username=user_name)
friend = Friend.objects.get(handle=target,friend=origin)
if(nok is 1):
# change friend verify status
friend.verify_status = 1
friend.save()
else:
# disagree to add, so delete it
friend.delete()
push_data = {}
push_data['user_name'] = user_name
jpush_send_message(toJSON(push_data), target_user, 1003)
data['status']=0
return HttpResponse(toJSON(data))
except ObjectDoesNotExist:
data['status']=28
return HttpResponse(toJSON(data))
def update_friend(request):
data = {}
if request.method == 'POST':
logger.debug(str(request.POST))
user_name = request.POST.get('username')
name_comment = request.POST.get('name_comment')
descrip = request.POST.get('description')
target_user=request.POST.get('target_user')
try:
src_user = User.objects.get(username = user_name)
logger.debug("friend description is "+descrip)
update_user = User.objects.get(username=target_user)
my_friend = Friend.objects.get(handle=src_user,friend=update_user)
# set breakpoint to trace
#import pdb; pdb.set_trace()
my_friend.name_comment = name_comment
my_friend.description = descrip
my_friend.save()
data['status']=0
return HttpResponse(toJSON(data))
except ObjectDoesNotExist:
data['status']=28
return HttpResponse(toJSON(data))
def list_contains_record(record_list, record):
if (record is None):
return False
record_id = record.id
for next in record_list:
if ((next != None) and (next['sid'] == record_id)):
return True
return False
def safe_attr(obj, attr_name):
if attr_name in obj:
return obj[attr_name]
return None
def process_client_changes(src_user, friends_buffer):
logger.debug('* Processing client changes')
# build an array of generic objects containing contact data,
# using the Django built-in JSON parser
logger.debug('Uploaded friends buffer: ' + smart_unicode(friends_buffer))
json_list = json.loads(friends_buffer)
logger.debug('Client-side updates: ' + str(len(json_list)))
for jrecord in json_list:
logger.debug('json record ' + str(jrecord))
new_record = False
sid = safe_attr(jrecord, 'sid')
if(sid != None):
logger.debug('Updating record: ' + str(sid))
record = Friend.objects.get(id=sid)
# if the 'change' for this record is that they were deleted
# on the client-side, all we want to do is set the deleted
# flag here, and we're done.
if(safe_attr(jrecord,'d') == 1):
record.deleted = True
record.save()
logger.debug('Deleted record: '+record.handle)
def get_updated_friends(src_user, client_state, updated_friends):
logger.debug('* Processing server changes')
timestamp = None
# the client sends the last high-water-mark that they sucessfully
# sync'd to in the syncstate parameter. it's opaque to them, but
# its actually a seconds-in-unix-epoch timestamp that we use
# as a baseline.
if client_state:
logger.debug('Client sync state: '+client_state)
timestamp = datetime.utcformattimestamp(long(client_state))
# keep track of the update/delete counts, so we can log in
# below. Makes debugging easier...
update_count = 0
delete_count = 0
records = Friend.objects.filter(handle=src_user)
if records:
# find the high-water mark for the most recently updated record.
# we'll return this as the syncstate (x) value for all the friends
# we return from this function.
high_water_date = datetime.min
for record in records:
result = record.updated.replace(microsecond=0, tzinfo=None)
logger.debug("record updated: "+str(result))
logger.debug("high water date: "+str(high_water_date))
if (result > high_water_date):
high_water_date = result
high_water_mark = str(long(_time.mktime(high_water_date.utctimetuple())) + 1)
logger.debug('New sync state: '+high_water_mark)
# Now build the updated_friends containing all the friends that have been
# changed since the last sync
for record in records:
# if our list of records we're returning already contains this
# record (for example, it's a record just uploaded from the client)
# then don't bother processing it any further...
if (list_contains_record(updated_friends, record)):
continue
friend_name = record.friend.username
friend = User.objects.get(username=friend_name)
logger.debug("record updated: "+str(record.updated))
logger.debug("timestamp: "+str(timestamp))
if timestamp is None or record.updated > timestamp:
if record.deleted == True:
delete_count = delete_count + 1
DeletedRecordData(updated_friends, src_user, friend, high_water_mark)
record.delete()
else:
update_count = update_count + 1
UpdatedRecordData(updated_friends, src_user, friend, None, high_water_mark)
logger.debug('Server-side updates: '+str(update_count))
logger.debug('Server-side deletes: '+str(delete_count))
def sync_friend(request):
user_name = request.POST.get('username')
# upload client dirty friends
updated_friends = []
result_friends = {}
if request.method == 'POST':
logger.debug("request POST: "+str(request.POST))
else:
logger.debug("request GET: "+str(request.GET))
client_buffer = request.POST.get('friends')
src_user = User.objects.get(username = user_name)
if((client_buffer != None) and (client_buffer != '')):
process_client_changes(src_user, client_buffer)
# add any friends on the server-side
client_state = request.POST.get('syncstate')
get_updated_friends(src_user, client_state, updated_friends)
result_friends['friends'] = updated_friends
logger.debug("update friends are : "+toJSON(result_friends))
# update latest friends
return HttpResponse(toJSON(result_friends))
class UpdatedRecordData(object):
"""Holds data for user's records.
This class knows how to serialize itself to JSON.
"""
__FIELD_MAP = {
'handle': 'h',
'friend': 'f',
'avatar': 'a',
'description': 'des',
'deleted': 'd',
'client_id': 'cid'
}
def __init__(self, record_list, src_user, friend_user, client_id, high_water_mark):
obj = Friend.objects.get(handle = src_user, friend=friend_user)
record = {}
data = obj.get_friend_info()
record['f'] = data['friend']
record['a'] = data['avatar']
record['sid'] = obj.id
record['x'] = high_water_mark
if (client_id != None):
logger.debug("mark client id: "+str(client_id))
record['cid'] = client_id
record_list.append(record)
class DeletedRecordData(object):
def __init__(self, record_list, src_user, friend_user, high_water_mark):
obj = Friend.objects.get(handle = src_user, friend=friend_user)
record = {}
record['d'] = 1
record['sid'] = obj.id
record['x'] = high_water_mark
record_list.append(record)
| |
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import pickle
from unittest import TestCase
import six
from hamcrest import assert_that, raises
from hamcrest import equal_to
from storops import exception
from storops.exception import VNXFsNotFoundError, VNXException, raise_if_err
from storops.vnx.enums import VNXProvisionEnum, \
VNXTieringEnum, VNXSPEnum, VNXRaidType, \
VNXMigrationRate, VNXPortType, VNXPoolRaidType, VNXCtrlMethod
from storops.vnx.nas_client import NasXmlResponse
from storops_test.vnx.nas_mock import MockXmlPost
class VNXErrorTest(TestCase):
def test_has_error_with_specific_error(self):
def f():
msg = ("SP A: Expansion LUN size must be "
"greater than current LUN size. (0x712d8e04)")
raise_if_err(msg)
assert_that(f, raises(exception.VNXLunExpandSizeError))
def test_cg_not_found(self):
def f():
output = "Cannot find the consistency group."
raise_if_err(output)
assert_that(f, raises(exception.VNXConsistencyGroupNotFoundError))
def test_snap_not_exists(self):
def f():
output = "The specified snapshot does not exist."
raise_if_err(output)
assert_that(f, raises(exception.VNXSnapNotExistsError))
def test_pool_lun_not_exists_multi_line(self):
def f():
output = """Could not retrieve the specified (pool lun).
The (pool lun) may not exist."""
raise_if_err(output)
assert_that(f, raises(exception.VNXLunNotFoundError))
def test_has_error_regular_string_false(self):
def f():
output = ("Cannot unbind LUN because "
"it's contained in a Storage Group.")
raise_if_err(output)
assert_that(f, raises(exception.VNXLunInStorageGroupError))
def test_has_error_ev_error(self):
class ForTest(object):
pass
error = ForTest()
error.where = 'EV_ScsiPipe::_sendCommand() - Sense Data'
error.why = 'SP A: LUN already exists in the specified storage group.'
error.who = '@(#)libconnect Revision 7.33.6.2.50 on 1/6/2015 21:54:55'
def f():
raise_if_err(error)
assert_that(f, raises(exception.VNXAluAlreadyAttachedError))
def test_sp_error_not_supported(self):
def f():
out = ('Error returned from the target: 10.244.211.32\n'
'CLI commands are not supported by the '
'target storage system.')
raise_if_err(out)
assert_that(f, raises(exception.VNXNotSupportedError))
def test_sp_error_time_out(self):
def f():
out = ("A network error occurred while "
"trying to connect: '10.244.211.33'.\n"
"Message : select: The connect timed out.")
raise_if_err(out)
assert_that(f, raises(exception.VNXSpNotAvailableError))
def test_raise_if_err_normal(self):
raise_if_err('')
# no raises
def test_raise_if_err_non_empty(self):
def f():
raise_if_err('error msg', msg="error received")
assert_that(f, raises(VNXException, "error received"))
def test_raise_if_err_lun_not_found(self):
def f():
out = ('Could not retrieve the specified (pool lun). '
'The (pool lun) may not exist')
raise_if_err(out)
assert_that(f, raises(exception.VNXLunNotFoundError))
def test_raise_if_err_nas_response_input(self):
def f():
resp = NasXmlResponse(MockXmlPost.read_file('fs_not_found.xml'))
resp.raise_if_err()
assert_that(f, raises(VNXFsNotFoundError, 'not found'))
class VNXProvisionEnumTest(TestCase):
def test_get_opt_dedup(self):
opt = VNXProvisionEnum.get_opt(VNXProvisionEnum.DEDUPED)
assert_that(' '.join(opt), equal_to('-type Thin -deduplication on'))
def test_get_opt_thin(self):
opt = VNXProvisionEnum.get_opt(VNXProvisionEnum.THIN)
assert_that(' '.join(opt), equal_to('-type Thin'))
def test_get_opt_thick(self):
opt = VNXProvisionEnum.get_opt(VNXProvisionEnum.THICK)
assert_that(' '.join(opt), equal_to('-type NonThin'))
def test_get_opt_compressed(self):
opt = VNXProvisionEnum.get_opt(VNXProvisionEnum.COMPRESSED)
assert_that(' '.join(opt), equal_to('-type Thin'))
def test_get_opt_not_available(self):
def f():
VNXProvisionEnum.get_opt('na')
assert_that(f, raises(ValueError))
class VNXTieringEnumTest(TestCase):
def test_get_opt(self):
opt = VNXTieringEnum.get_opt(VNXTieringEnum.HIGH_AUTO)
assert_that(
' '.join(opt),
equal_to('-initialTier highestAvailable -tieringPolicy autoTier'))
def test_get_opt_not_available(self):
def f():
VNXTieringEnum.get_opt('na')
assert_that(f, raises(ValueError))
def test_invalid_tier_enum(self):
def f():
VNXTieringEnum('abc')
assert_that(f, raises(ValueError, 'not a valid VNXTieringEnum'))
def test_valid_tier_enum(self):
auto = VNXTieringEnum('auto')
assert_that(auto, equal_to(VNXTieringEnum.AUTO))
class VNXSPEnumTest(TestCase):
def test_from_str(self):
data = {
'spa': VNXSPEnum.SP_A,
'sp': None,
'sp_a': VNXSPEnum.SP_A,
'SP b': VNXSPEnum.SP_B,
'a': VNXSPEnum.SP_A,
'b': VNXSPEnum.SP_B,
'cs': VNXSPEnum.CONTROL_STATION,
'Celerra_CS0_21111': VNXSPEnum.CONTROL_STATION,
'VPI-24092B': VNXSPEnum.SP_B
}
for k, v in six.iteritems(data):
assert_that(VNXSPEnum.parse(k), equal_to(v),
'input: {}'.format(k))
assert_that(pickle.loads(pickle.dumps(VNXSPEnum.parse(k))) is v,
equal_to(True),
'input: {}'.format(k))
def test_get_sp_index_err(self):
def f():
VNXSPEnum.get_sp_index('abc')
assert_that(f, raises(ValueError, 'not a valid sp'))
def test_get_sp_index(self):
assert_that(VNXSPEnum.get_sp_index('spa'), equal_to('a'))
def test_sp_value(self):
assert_that(VNXSPEnum.SP_B.value, equal_to('SP B'))
def test_index(self):
assert_that(VNXSPEnum.SP_A.index, equal_to('a'))
assert_that(VNXSPEnum.SP_B.index, equal_to('b'))
class VNXRaidTypeTest(TestCase):
def test_from_str(self):
assert_that(VNXRaidType.from_str('r5'), equal_to(VNXRaidType.RAID5))
def test_disk_requirement(self):
assert_that(VNXPoolRaidType.RAID5.min_disk_requirement, equal_to(3))
class VNXMigrationRateTest(TestCase):
def test_text_type(self):
assert_that(six.text_type(VNXMigrationRate.HIGH),
equal_to('{"VNXMigrationRate": {"value": "high"}}'))
class VNXPortTypeTest(TestCase):
def test_parse_iqn(self):
ret = VNXPortType.parse('iqn.1992-04.com.emc:c.a.b')
assert_that(ret, equal_to(VNXPortType.ISCSI))
def test_parse_wwn(self):
ret = VNXPortType.parse('50:06:01:60:B6:E0:16:81:'
'50:06:01:68:36:E4:16:81')
assert_that(ret, equal_to(VNXPortType.FC))
class VNXControlMethodTest(TestCase):
def test_get_no_ctrl(self):
no_ctrl = VNXCtrlMethod(method=VNXCtrlMethod.NO_CTRL)
options = no_ctrl.get_option()
assert_that(options, equal_to(['-noctrl']))
def test_get_limit_ctrl(self):
limit_ctrl = VNXCtrlMethod(method=VNXCtrlMethod.LIMIT_CTRL,
metric='bw',
value=100)
options = limit_ctrl.get_option()
assert_that(options, equal_to(['-ctrlmethod', 'limit', '-gmetric',
'bw', '-gval', 100]))
def test_get_cruise_ctrl(self):
cruise_ctrl = VNXCtrlMethod(method=VNXCtrlMethod.CRUISE_CTRL,
metric='bw',
value=100,
tolerance=10)
options = cruise_ctrl.get_option()
assert_that(options, equal_to(['-ctrlmethod', 'cruise', '-gmetric',
'bw', '-gval', 100, '-gtol', 10]))
def test_get_fix_ctrl(self):
fixed_ctrl = VNXCtrlMethod(method=VNXCtrlMethod.FIXED_CTRL,
value=100)
options = fixed_ctrl.get_option()
assert_that(options, equal_to(['-ctrlmethod', 'fixed', '-gval', 100]))
def test_get_invalid_ctrl(self):
def _inner():
invalid = VNXCtrlMethod(method='invalid')
invalid.get_option()
assert_that(_inner, raises(ValueError,
'Invalid control method specified.'))
| |
"""Test for the smhi weather entity."""
import asyncio
from datetime import datetime
import logging
from unittest.mock import Mock, patch
from homeassistant.components.smhi import weather as weather_smhi
from homeassistant.components.smhi.const import ATTR_SMHI_CLOUDINESS
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_WEATHER_ATTRIBUTION,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_VISIBILITY,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
DOMAIN as WEATHER_DOMAIN,
)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
_LOGGER = logging.getLogger(__name__)
TEST_CONFIG = {"name": "test", "longitude": "17.84197", "latitude": "59.32624"}
async def test_setup_hass(hass: HomeAssistant, aioclient_mock) -> None:
"""Test for successfully setting up the smhi platform.
This test are deeper integrated with the core. Since only
config_flow is used the component are setup with
"async_forward_entry_setup". The actual result are tested
with the entity state rather than "per function" unity tests
"""
from smhi.smhi_lib import APIURL_TEMPLATE
uri = APIURL_TEMPLATE.format(TEST_CONFIG["longitude"], TEST_CONFIG["latitude"])
api_response = load_fixture("smhi.json")
aioclient_mock.get(uri, text=api_response)
entry = MockConfigEntry(domain="smhi", data=TEST_CONFIG)
await hass.config_entries.async_forward_entry_setup(entry, WEATHER_DOMAIN)
assert aioclient_mock.call_count == 1
# Testing the actual entity state for
# deeper testing than normal unity test
state = hass.states.get("weather.smhi_test")
assert state.state == "sunny"
assert state.attributes[ATTR_SMHI_CLOUDINESS] == 50
assert state.attributes[ATTR_WEATHER_ATTRIBUTION].find("SMHI") >= 0
assert state.attributes[ATTR_WEATHER_HUMIDITY] == 55
assert state.attributes[ATTR_WEATHER_PRESSURE] == 1024
assert state.attributes[ATTR_WEATHER_TEMPERATURE] == 17
assert state.attributes[ATTR_WEATHER_VISIBILITY] == 50
assert state.attributes[ATTR_WEATHER_WIND_SPEED] == 7
assert state.attributes[ATTR_WEATHER_WIND_BEARING] == 134
_LOGGER.error(state.attributes)
assert len(state.attributes["forecast"]) == 4
forecast = state.attributes["forecast"][1]
assert forecast[ATTR_FORECAST_TIME] == "2018-09-02T12:00:00"
assert forecast[ATTR_FORECAST_TEMP] == 21
assert forecast[ATTR_FORECAST_TEMP_LOW] == 6
assert forecast[ATTR_FORECAST_PRECIPITATION] == 0
assert forecast[ATTR_FORECAST_CONDITION] == "partlycloudy"
async def test_setup_plattform(hass):
"""Test that setup platform does nothing."""
assert await weather_smhi.async_setup_platform(hass, None, None) is None
def test_properties_no_data(hass: HomeAssistant) -> None:
"""Test properties when no API data available."""
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
assert weather.name == "name"
assert weather.should_poll is True
assert weather.temperature is None
assert weather.humidity is None
assert weather.wind_speed is None
assert weather.wind_bearing is None
assert weather.visibility is None
assert weather.pressure is None
assert weather.cloudiness is None
assert weather.condition is None
assert weather.forecast is None
assert weather.temperature_unit == TEMP_CELSIUS
# pylint: disable=protected-access
def test_properties_unknown_symbol() -> None:
"""Test behaviour when unknown symbol from API."""
hass = Mock()
data = Mock()
data.temperature = 5
data.mean_precipitation = 0.5
data.total_precipitation = 1
data.humidity = 5
data.wind_speed = 10
data.wind_direction = 180
data.horizontal_visibility = 6
data.pressure = 1008
data.cloudiness = 52
data.symbol = 100 # Faulty symbol
data.valid_time = datetime(2018, 1, 1, 0, 1, 2)
data2 = Mock()
data2.temperature = 5
data2.mean_precipitation = 0.5
data2.total_precipitation = 1
data2.humidity = 5
data2.wind_speed = 10
data2.wind_direction = 180
data2.horizontal_visibility = 6
data2.pressure = 1008
data2.cloudiness = 52
data2.symbol = 100 # Faulty symbol
data2.valid_time = datetime(2018, 1, 1, 12, 1, 2)
data3 = Mock()
data3.temperature = 5
data3.mean_precipitation = 0.5
data3.total_precipitation = 1
data3.humidity = 5
data3.wind_speed = 10
data3.wind_direction = 180
data3.horizontal_visibility = 6
data3.pressure = 1008
data3.cloudiness = 52
data3.symbol = 100 # Faulty symbol
data3.valid_time = datetime(2018, 1, 2, 12, 1, 2)
testdata = [data, data2, data3]
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
weather._forecasts = testdata
assert weather.condition is None
forecast = weather.forecast[0]
assert forecast[ATTR_FORECAST_CONDITION] is None
# pylint: disable=protected-access
async def test_refresh_weather_forecast_exceeds_retries(hass) -> None:
"""Test the refresh weather forecast function."""
from smhi.smhi_lib import SmhiForecastException
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
weather._fail_count = 2
await weather.async_update()
assert weather._forecasts is None
assert not call_later.mock_calls
async def test_refresh_weather_forecast_timeout(hass) -> None:
"""Test timeout exception."""
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather, "retry_update"
), patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=asyncio.TimeoutError,
):
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_refresh_weather_forecast_exception() -> None:
"""Test any exception."""
from smhi.smhi_lib import SmhiForecastException
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(weather_smhi, "async_timeout"), patch.object(
weather_smhi.SmhiWeather, "retry_update"
), patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
hass.async_add_job = Mock()
call_later = hass.helpers.event.async_call_later
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_retry_update():
"""Test retry function of refresh forecast."""
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(weather_smhi.SmhiWeather, "async_update") as update:
await weather.retry_update()
assert len(update.mock_calls) == 1
def test_condition_class():
"""Test condition class."""
def get_condition(index: int) -> str:
"""Return condition given index."""
return [k for k, v in weather_smhi.CONDITION_CLASSES.items() if index in v][0]
# SMHI definitions as follows, see
# http://opendata.smhi.se/apidocs/metfcst/parameters.html
# 1. Clear sky
assert get_condition(1) == "sunny"
# 2. Nearly clear sky
assert get_condition(2) == "sunny"
# 3. Variable cloudiness
assert get_condition(3) == "partlycloudy"
# 4. Halfclear sky
assert get_condition(4) == "partlycloudy"
# 5. Cloudy sky
assert get_condition(5) == "cloudy"
# 6. Overcast
assert get_condition(6) == "cloudy"
# 7. Fog
assert get_condition(7) == "fog"
# 8. Light rain showers
assert get_condition(8) == "rainy"
# 9. Moderate rain showers
assert get_condition(9) == "rainy"
# 18. Light rain
assert get_condition(18) == "rainy"
# 19. Moderate rain
assert get_condition(19) == "rainy"
# 10. Heavy rain showers
assert get_condition(10) == "pouring"
# 20. Heavy rain
assert get_condition(20) == "pouring"
# 21. Thunder
assert get_condition(21) == "lightning"
# 11. Thunderstorm
assert get_condition(11) == "lightning-rainy"
# 15. Light snow showers
assert get_condition(15) == "snowy"
# 16. Moderate snow showers
assert get_condition(16) == "snowy"
# 17. Heavy snow showers
assert get_condition(17) == "snowy"
# 25. Light snowfall
assert get_condition(25) == "snowy"
# 26. Moderate snowfall
assert get_condition(26) == "snowy"
# 27. Heavy snowfall
assert get_condition(27) == "snowy"
# 12. Light sleet showers
assert get_condition(12) == "snowy-rainy"
# 13. Moderate sleet showers
assert get_condition(13) == "snowy-rainy"
# 14. Heavy sleet showers
assert get_condition(14) == "snowy-rainy"
# 22. Light sleet
assert get_condition(22) == "snowy-rainy"
# 23. Moderate sleet
assert get_condition(23) == "snowy-rainy"
# 24. Heavy sleet
assert get_condition(24) == "snowy-rainy"
| |
# -*- coding: utf-8 -*-
from itertools import chain
from classytags.arguments import Argument, MultiValueArgument, KeywordArgument, MultiKeywordArgument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag, AsTag
from classytags.parser import Parser
from cms.models import Page, Placeholder as PlaceholderModel
from cms.plugin_rendering import render_placeholder
from cms.plugins.utils import get_plugins, assign_plugins
from cms.utils import get_language_from_request, get_cms_setting
from cms.utils.page_resolver import get_page_queryset, use_draft
from cms.utils.placeholder import validate_placeholder_name
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_managers
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, get_language
import re
from sekizai.helpers import Watcher, get_varname
register = template.Library()
def get_site_id(site):
if site:
if isinstance(site, Site):
site_id = site.id
elif isinstance(site, int) or (isinstance(site, basestring) and site.isdigit()):
site_id = int(site)
else:
site_id = settings.SITE_ID
else:
site_id = settings.SITE_ID
return site_id
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
CLEAN_KEY_PATTERN = re.compile(r'[^a-zA-Z0-9_-]')
def _clean_key(key):
return CLEAN_KEY_PATTERN.sub('-', key)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
page_key = _clean_key(page_key)
return name + '__page_lookup:' + page_key + '_site:' + str(site_id) + '_lang:' + str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
return page_lookup
if isinstance(page_lookup, basestring):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, (int, long)):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
if 'pk' in page_lookup:
page = Page.objects.all().get(**page_lookup)
if request and use_draft(request):
if page.publisher_is_draft:
return page
else:
return page.publisher_draft
else:
if page.publisher_is_draft:
return page.publisher_public
else:
return page
else:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain': site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s") \
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
if settings.SEND_BROKEN_LINK_EMAILS:
mail_managers(subject, body, fail_silently=True)
return None
class PageUrl(InclusionTag):
template = 'cms/content.html'
name = 'page_url'
options = Options(
Argument('page_lookup'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, context, page_lookup, lang, site):
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return {'content': ''}
if request.current_page == "dummy":
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id) + '_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url, get_cms_setting('CACHE_DURATIONS')['content'])
if url:
return {'content': url}
return {'content': ''}
register.tag(PageUrl)
register.tag('page_id_url', PageUrl)
def _get_placeholder(current_page, page, context, name):
from cms.utils.plugins import get_placeholders
placeholder_cache = getattr(current_page, '_tmp_placeholders_cache', {})
if page.pk in placeholder_cache:
return placeholder_cache[page.pk].get(name, None)
placeholder_cache[page.pk] = {}
slots = get_placeholders(page.get_template())
placeholders = page.placeholders.filter(slot__in=slots)
assign_plugins(context['request'], placeholders, get_language())
for placeholder in placeholders:
placeholder_cache[page.pk][placeholder.slot] = placeholder
placeholder.page = page
current_page._tmp_placeholders_cache = placeholder_cache
return placeholder_cache[page.pk].get(name, None)
def get_placeholder_content(context, request, current_page, name, inherit):
edit_mode = getattr(request, 'toolbar', None) and getattr(request.toolbar, 'edit_mode')
pages = [current_page]
# don't display inherited plugins in edit mode, so that the user doesn't
# mistakenly edit/delete them. This is a fix for issue #1303. See the discussion
# there for possible enhancements
if inherit and not edit_mode:
pages = chain([current_page], current_page.get_cached_ancestors(ascending=True))
for page in pages:
placeholder = _get_placeholder(current_page, page, context, name)
if placeholder is None:
continue
if not get_plugins(request, placeholder):
continue
# @modified wej
# ToDo: Add snippet processing logic
content = render_placeholder(placeholder, context, name)
if content:
return content
# if we reach this point, we have an empty or non-existant placeholder
# call _get_placeholder again to get the placeholder properly rendered
# in frontend editing
placeholder = _get_placeholder(current_page, current_page, context, name)
return render_placeholder(placeholder, context, name)
class PlaceholderParser(Parser):
def parse_blocks(self):
"""
for bit in getattr(self.kwargs['extra_bits'], 'value', self.kwargs['extra_bits']):
if getattr(bit, 'value', bit.var.value) == 'or':
return super(PlaceholderParser, self).parse_blocks()
return
"""
for bit,value in self.kwargs['extra_bits'].iteritems():
if bit == 'or' or value.var.value.strip('"').strip("'") == 'or':
return super(PlaceholderParser, self).parse_blocks()
return
class PlaceholderOptions(Options):
def get_parser_class(self):
return PlaceholderParser
class Placeholder(Tag):
"""
This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
name = 'placeholder'
options = PlaceholderOptions(
Argument('name', resolve=False),
#from @wej @start-added Tue Jun 11 21:38:48 CST 2013
#KeywordArgument('group', required=False, resolve=False),
#KeywordArgument('width', required=False, resolve=False),
#KeywordArgument('height', required=False, resolve=False),
#KeywordArgument('has_tooltip', required=False, resolve=False),
MultiKeywordArgument('extra_bits', required=False, resolve=False),
#MultiValueArgument('extra_bits', required=False, resolve=False),
blocks=[
('endplaceholder', 'nodelist'),
]
)
def render_tag(self, context, name, extra_bits, nodelist=None):
validate_placeholder_name(name)
width = None
inherit = False
for bit,value in extra_bits.iteritems():
if bit == 'inherit':
inherit = True
elif bit == 'width' and value.isdigit():
width = int(value)
import warnings
warnings.warn(
"The width parameter for the placeholder tag is deprecated.",
DeprecationWarning
)
if not 'request' in context:
return ''
request = context['request']
if width:
context.update({'width': width})
page = request.current_page
if not page or page == 'dummy':
if nodelist:
return nodelist.render(context)
return ''
content = get_placeholder_content(context, request, page, name, inherit)
if not content and nodelist:
return nodelist.render(context)
return content
def get_name(self):
return self.kwargs['name'].var.value.strip('"').strip("'")
#from @wej modified
def get_group(self):
for k,v in self.kwargs['extra_bits'].iteritems():
if k == 'group':
return v.var.value.strip('"').strip("'");
return None
def get_width(self):
for k,v in self.kwargs['extra_bits'].iteritems():
if k == 'width':
return int(v.var.value.strip('"').strip("'"));
return None
def get_height(self):
for k,v in self.kwargs['extra_bits'].iteritems():
if k == 'height':
return int(v.var.value.strip('"').strip("'"));
return None
def get_tips(self):
for k,v in self.kwargs['extra_bits'].iteritems():
if k == 'tips':
rtn = v.var.value.strip('""').strip("'")
rtn = _(rtn)
return rtn
return None
register.tag(Placeholder)
class RenderPlugin(InclusionTag):
template = 'cms/content.html'
name = 'render_plugin'
options = Options(
Argument('plugin')
)
def get_context(self, context, plugin):
# Prepend frontedit toolbar output if applicable
edit = False
request = context['request']
toolbar = getattr(request, 'toolbar', None)
page = request.current_page
if toolbar.edit_mode and (not page or page.has_change_permission(request)):
edit = True
if edit:
from cms.middleware.toolbar import toolbar_plugin_processor
processors = (toolbar_plugin_processor,)
else:
processors = None
return {'content': plugin.render_plugin(context, processors=processors)}
register.tag(RenderPlugin)
class PageAttribute(AsTag):
"""
This template node is used to output attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" as varname %}
{% page_attribute "field-name" page_lookup %}
{% page_attribute "field-name" page_lookup as varname %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
{# Assign page_title attribute to a variable: #}
{% page_attribute "page_title" as title %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- meta_keywords
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
varname -- context variable name. Output will be added to template context as this variable.
This argument is required to follow the 'as' keyword.
"""
name = 'page_attribute'
options = Options(
Argument('name', resolve=False),
Argument('page_lookup', required=False, default=None),
'as',
Argument('varname', required=False, resolve=False)
)
valid_attributes = [
"title",
"slug",
"meta_description",
"meta_keywords",
"page_title",
"menu_title"
]
def get_value(self, context, name, page_lookup):
if not 'request' in context:
return ''
name = name.lower()
request = context['request']
lang = get_language_from_request(request)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
if page == "dummy":
return ''
if page and name in self.valid_attributes:
func = getattr(page, "get_%s" % name)
return escape(func(language=lang, fallback=True))
return ''
register.tag(PageAttribute)
class CleanAdminListFilter(InclusionTag):
template = 'admin/filter.html'
name = 'clean_admin_list_filter'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices': unique_choices}
def _restore_sekizai(context, changes):
varname = get_varname()
sekizai_container = context[varname]
for key, values in changes.items():
sekizai_namespace = sekizai_container[key]
for value in values:
sekizai_namespace.append(value)
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup
arguments in the given language.
This is useful if you want to have some more or less static content that is
shared among many pages, such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types
and their interpretation for the page_lookup argument.
"""
validate_placeholder_name(placeholder_name)
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
if cache_result:
base_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)
cache_key = _clean_key('%s_placeholder:%s' % (base_key, placeholder_name))
cached_value = cache.get(cache_key)
if isinstance(cached_value, dict): # new style
_restore_sekizai(context, cached_value['sekizai'])
return {'content': mark_safe(cached_value['content'])}
elif isinstance(cached_value, basestring): # old style
return {'content': mark_safe(cached_value)}
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
try:
placeholder = page.placeholders.get(slot=placeholder_name)
except PlaceholderModel.DoesNotExist:
if settings.DEBUG:
raise
return {'content': ''}
watcher = Watcher(context)
content = render_placeholder(placeholder, context, placeholder_name)
changes = watcher.get_changes()
if cache_result:
cache.set(cache_key, {'content': content, 'sekizai': changes}, get_cms_setting('CACHE_DURATIONS')['content'])
if content:
return {'content': mark_safe(content)}
return {'content': ''}
class ShowPlaceholderById(InclusionTag):
template = 'cms/content.html'
name = 'show_placeholder_by_id'
options = Options(
Argument('placeholder_name'),
Argument('reverse_id'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, *args, **kwargs):
return _show_placeholder_for_page(**self.get_kwargs(*args, **kwargs))
def get_kwargs(self, context, placeholder_name, reverse_id, lang, site):
return {
'context': context,
'placeholder_name': placeholder_name,
'page_lookup': reverse_id,
'lang': lang,
'site': site
}
register.tag(ShowPlaceholderById)
register.tag('show_placeholder', ShowPlaceholderById)
class ShowUncachedPlaceholderById(ShowPlaceholderById):
name = 'show_uncached_placeholder_by_id'
def get_kwargs(self, *args, **kwargs):
kwargs = super(ShowUncachedPlaceholderById, self).get_kwargs(*args, **kwargs)
kwargs['cache_result'] = False
return kwargs
register.tag(ShowUncachedPlaceholderById)
register.tag('show_uncached_placeholder', ShowUncachedPlaceholderById)
class CMSToolbar(InclusionTag):
template = 'cms/toolbar/toolbar.html'
name = 'cms_toolbar'
def render(self, context):
request = context.get('request', None)
if not request:
return ''
toolbar = getattr(request, 'toolbar', None)
if not toolbar:
return ''
if not toolbar.show_toolbar:
return ''
return super(CMSToolbar, self).render(context)
def get_context(self, context):
context['CMS_TOOLBAR_CONFIG'] = context['request'].toolbar.as_json(context)
return context
register.tag(CMSToolbar)
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from StringIO import StringIO
import traceback
from appengine_wrappers import (
DeadlineExceededError, IsDevServer, logservice, memcache, urlfetch, webapp)
from branch_utility import BranchUtility
from server_instance import ServerInstance
import svn_constants
import time
# The default channel to serve docs for if no channel is specified.
_DEFAULT_CHANNEL = 'stable'
class Handler(webapp.RequestHandler):
# AppEngine instances should never need to call out to SVN. That should only
# ever be done by the cronjobs, which then write the result into DataStore,
# which is as far as instances look.
#
# Why? SVN is slow and a bit flaky. Cronjobs failing is annoying but
# temporary. Instances failing affects users, and is really bad.
#
# Anyway - to enforce this, we actually don't give instances access to SVN.
# If anything is missing from datastore, it'll be a 404. If the cronjobs
# don't manage to catch everything - uhoh. On the other hand, we'll figure it
# out pretty soon, and it also means that legitimate 404s are caught before a
# round trip to SVN.
#
# However, we can't expect users of preview.py nor the dev server to run a
# cronjob first, so, this is a hack allow that to be online all of the time.
# TODO(kalman): achieve this via proper dependency injection.
ALWAYS_ONLINE = IsDevServer()
def __init__(self, request, response):
super(Handler, self).__init__(request, response)
def _HandleGet(self, path):
channel_name, real_path = BranchUtility.SplitChannelNameFromPath(path)
if channel_name == _DEFAULT_CHANNEL:
self.redirect('/%s' % real_path)
return
if channel_name is None:
channel_name = _DEFAULT_CHANNEL
# TODO(kalman): Check if |path| is a directory and serve path/index.html
# rather than special-casing apps/extensions.
if real_path.strip('/') == 'apps':
real_path = 'apps/index.html'
if real_path.strip('/') == 'extensions':
real_path = 'extensions/index.html'
constructor = (
ServerInstance.CreateOnline if Handler.ALWAYS_ONLINE else
ServerInstance.GetOrCreateOffline)
server_instance = constructor(channel_name)
canonical_path = server_instance.path_canonicalizer.Canonicalize(real_path)
if real_path != canonical_path:
self.redirect(canonical_path)
return
server_instance.Get(real_path, self.request, self.response)
def _HandleCron(self, path):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
class MockResponse(object):
def __init__(self):
self.status = 200
self.out = StringIO()
self.headers = {}
def set_status(self, status):
self.status = status
def clear(self, *args):
pass
class MockRequest(object):
def __init__(self, path):
self.headers = {}
self.path = path
self.url = '//localhost/%s' % path
channel = path.split('/')[-1]
logging.info('cron/%s: starting' % channel)
server_instance = ServerInstance.CreateOnline(channel)
def run_cron_for_dir(d, path_prefix=''):
success = True
start_time = time.time()
files = [f for f in server_instance.content_cache.GetFromFileListing(d)
if not f.endswith('/')]
logging.info('cron/%s: rendering %s files from %s...' % (
channel, len(files), d))
for i, f in enumerate(files):
error = None
path = '%s%s' % (path_prefix, f)
try:
response = MockResponse()
server_instance.Get(path, MockRequest(path), response)
if response.status != 200:
error = 'Got %s response' % response.status
except DeadlineExceededError:
logging.error(
'cron/%s: deadline exceeded rendering %s (%s of %s): %s' % (
channel, path, i + 1, len(files), traceback.format_exc()))
raise
except error:
pass
if error:
logging.error('cron/%s: error rendering %s: %s' % (
channel, path, error))
success = False
logging.info('cron/%s: rendering %s files from %s took %s seconds' % (
channel, len(files), d, time.time() - start_time))
return success
success = True
for path, path_prefix in (
# Note: rendering the public templates will pull in all of the private
# templates.
(svn_constants.PUBLIC_TEMPLATE_PATH, ''),
# Note: rendering the public templates will have pulled in the .js and
# manifest.json files (for listing examples on the API reference pages),
# but there are still images, CSS, etc.
(svn_constants.STATIC_PATH, 'static/'),
(svn_constants.EXAMPLES_PATH, 'extensions/examples/')):
try:
# Note: don't try to short circuit any of this stuff. We want to run
# the cron for all the directories regardless of intermediate failures.
success = run_cron_for_dir(path, path_prefix=path_prefix) and success
except DeadlineExceededError:
success = False
break
if success:
self.response.status = 200
self.response.out.write('Success')
else:
self.response.status = 500
self.response.out.write('Failure')
logging.info('cron/%s: finished' % channel)
def _RedirectSpecialCases(self, path):
google_dev_url = 'http://developer.google.com/chrome'
if path == '/' or path == '/index.html':
self.redirect(google_dev_url)
return True
if path == '/apps.html':
self.redirect('/apps/about_apps.html')
return True
return False
def _RedirectFromCodeDotGoogleDotCom(self, path):
if (not self.request.url.startswith(('http://code.google.com',
'https://code.google.com'))):
return False
new_url = 'http://developer.chrome.com/'
# switch to https if necessary
if (self.request.url.startswith('https')):
new_url = new_url.replace('http', 'https', 1)
path = path.split('/')
if len(path) > 0 and path[0] == 'chrome':
path.pop(0)
for channel in BranchUtility.GetAllBranchNames():
if channel in path:
position = path.index(channel)
path.pop(position)
path.insert(0, channel)
new_url += '/'.join(path)
self.redirect(new_url)
return True
def get(self):
path = self.request.path
if path in ['favicon.ico', 'robots.txt']:
response.set_status(404)
return
if self._RedirectSpecialCases(path):
return
if path.startswith('/cron'):
# Crons often time out, and when they do *and* then eventually try to
# flush logs they die. Turn off autoflush and manually do so at the end.
logservice.AUTOFLUSH_ENABLED = False
try:
self._HandleCron(path)
finally:
logservice.flush()
return
# Redirect paths like "directory" to "directory/". This is so relative
# file paths will know to treat this as a directory.
if os.path.splitext(path)[1] == '' and path[-1] != '/':
self.redirect(path + '/')
return
path = path.strip('/')
if self._RedirectFromCodeDotGoogleDotCom(path):
return
self._HandleGet(path)
| |
# A VAB parser based on pyole
from pyole import *
class VBABase(OLEBase):
def _decompress(self, data):
CompressedCurrent = 0
DecompressedCurrent = 0
CompressedRecordEnd = len(data)
DecompressedBuffer = ''
SignatureByte = ord(data[CompressedCurrent])
if SignatureByte != 0x01:
self.ole_logger.debug('CompressedContainer.SignatureByte has an abnormal value.')
return None
CompressedCurrent += 1
i = 0
while CompressedCurrent < CompressedRecordEnd:
CompressedChunkStart = CompressedCurrent
CompressedChunkHeader = struct.unpack('<H', data[CompressedChunkStart:CompressedChunkStart+0x02])[0]
CompressedChunkSize = (CompressedChunkHeader & 0x0FFF) + 0x03
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Size: ' + str(hex(CompressedChunkSize)))
if CompressedChunkSize < 3 or CompressedChunkSize > 4098:
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Size has an abnormal value.')
return None
CompressedChunkFlag = (CompressedChunkHeader >> 15)
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Flag: ' + str(hex(CompressedChunkFlag)))
if (CompressedChunkStart + CompressedChunkSize) > CompressedRecordEnd:
CompressedEnd = CompressedRecordEnd
else:
CompressedEnd = CompressedChunkStart + CompressedChunkSize
DecompressedChunkStart = DecompressedCurrent
CompressedCurrent = CompressedChunkStart + 0x02
if CompressedChunkFlag == 0x01:
while CompressedCurrent < CompressedEnd:
FlagByte = ord(data[CompressedCurrent])
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Token.FlagByte: ' + str(hex(FlagByte)))
CompressedCurrent += 1
for j in range(0, 8):
if CompressedCurrent < CompressedEnd:
FlagBit = (FlagByte >> j) & 0x01
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Token[' + str(j) + '].FlagBit: ' + str(hex(FlagBit)))
if FlagBit == 0x00:
DecompressedBuffer += data[CompressedCurrent]
DecompressedCurrent = len(DecompressedBuffer)
CompressedCurrent += 1
else:
CopyToken = struct.unpack('<H', data[CompressedCurrent:CompressedCurrent+0x02])[0]
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Token[' + str(j) + '].CopyToken: ' + str(hex(CopyToken)))
difference = DecompressedCurrent - DecompressedChunkStart
for bitcount in range(1, 13):
if (2 ** bitcount) >= difference:
break
if bitcount < 4:
bitcount = 4
lengthmask = (0xFFFF >> bitcount)
offsetmask = (~lengthmask & 0xFFFF)
length = (CopyToken & lengthmask) + 3
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Token[' + str(j) + '].Lenght: ' + str(hex(length)))
offset = ((CopyToken & offsetmask) >> (16 - bitcount)) + 1
self.ole_logger.debug('CompressedChunk[' + str(i) + '].Token[' + str(j) + '].Offset: ' + str(hex(offset)))
srcoffset = DecompressedCurrent - offset
for index in range(0, length):
DecompressedBuffer += DecompressedBuffer[srcoffset+index]
DecompressedCurrent = len(DecompressedBuffer)
CompressedCurrent += 2
else:
DecompressedBuffer += data[CompressedCurrent:CompressedCurrent+4096]
DecompressedCurrent = len(DecompressedBuffer)
CompressedCurrent += 4096
i += 1
return DecompressedBuffer
class ProjectStream(VBABase):
Property = dict()
HostExtenders = dict()
Workspace = dict()
def __init__(self, data):
self.Property = dict()
self.HostExtenders = dict()
self.Workspace = dict()
self.ole_logger.debug('######## VBAProjectProperties ########')
items = data.split('\r\n\r\n')
self.Property = self._parse_property(items[0])
self.HostExtenders = self._parse_property(items[1])
self.Workspace = self._parse_property(items[2])
def _parse_property(self, data):
property = dict()
items = data.split('\r\n')
for item in items:
self.ole_logger.debug(item)
if -1 != item.find('='):
key, value = item.split('=')
if False == property.has_key(key):
property[key] = value
else:
property[key] = property[key] + ',' + value
return property
class Projectwm(VBABase):
NameMap = list()
def __init__(self, data):
self.NameMap = list()
self.ole_logger.debug('######## PROJECTwmStream ########')
if len(data) > 0x02 and data[-2:] == '\x00\x00':
namemaps = data.split('\x00\x00\x00')
for i in range(0, len(namemaps)-1):
index = namemaps[i].find('\x00')
if -1 != index:
namemap_mbcs = namemaps[i][0:index]
self.ole_logger.debug('PROJECTwm.NameMap[' + str(i) + '].MBCS: ' + namemap_mbcs)
namemap_utf16 = namemaps[i][index+1:]+'\x00'
self.ole_logger.debug('PROJECTwm.NameMap[' + str(i) + '].UTF16: ' + namemap_utf16)
else:
self._raise_exception('PROJECTwm.NameMap[' + str(i) + '] has an abnormal values.')
else:
self._raise_exception('PROJECTwm stream contains abnormal values.')
class VBAProject(VBABase):
Reserved1 = 0
Version = 0
Reserved2 = 0
Reserved3 = 0
PerformanceCache = ''
def __init__(self, data):
self.Reserved1 = 0
self.Version = 0
self.Reserved2 = 0
self.Reserved3 = 0
self.PerformanceCache = ''
self.ole_logger.debug('######## _VBA_PROJECTStream ########')
self.Reserved1 = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('_VBA_PROJECT.Reserved1: ' + str(hex(self.Reserved1)))
if self.Reserved1 != 0x61CC:
self._raise_exception('_VBA_PROJECT.Reserved1 has an abnormal values.')
self.Version = struct.unpack('<H', data[0x02:0x04])[0]
self.ole_logger.debug('_VBA_PROJECT.Version: ' + str(hex(self.Version)))
self.Reserved2 = ord(data[0x04])
self.ole_logger.debug('_VBA_PROJECT.Reserved2: ' + str(hex(self.Reserved2)))
if self.Reserved2 != 0x00:
self._raise_exception('_VBA_PROJECT.Reserved2 has an abnormal values.')
self.Reserved3 = struct.unpack('<H', data[0x05:0x07])[0]
self.ole_logger.debug('_VBA_PROJECT.Reserved3: ' + str(hex(self.Reserved3)))
self.PerformanceCache = data[0x07:]
class ProjectSysKindRecord(VBABase):
Id = 0
Size = 0
SysKind = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.SysKind = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectSysKindRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x01:
self._raise_exception('ProjectSysKindRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectSysKindRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x04:
self._raise_exception('ProjectSysKindRecord.Size has an abnormal value.')
self.SysKind = struct.unpack('<I', data[0x06:0x0A])[0]
if self.SysKind == 0x00:
self.ole_logger.debug('ProjectSysKindRecord.SysKind: ' + str(hex(self.SysKind)) + ' (16-bit Windows Platforms)')
elif self.SysKind == 0x01:
self.ole_logger.debug('ProjectSysKindRecord.SysKind: ' + str(hex(self.SysKind)) + ' (32-bit Windows Platforms)')
elif self.SysKind == 0x02:
self.ole_logger.debug('ProjectSysKindRecord.SysKind: ' + str(hex(self.SysKind)) + ' (Macintosh Platforms)')
elif self.SysKind == 0x03:
self.ole_logger.debug('ProjectSysKindRecord.SysKind: ' + str(hex(self.SysKind)) + ' (64-bit Windows Platforms)')
else:
self._raise_exception('ProjectSysKindRecord.SysKind has an abnormal value.')
class ProjectLcidRecord(VBABase):
Id = 0
Size = 0
Lcid = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.Lcid = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectLcidRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x02:
self._raise_exception('ProjectLcidRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectLcidRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x04:
self._raise_exception('ProjectLcidRecord.Size has an abnormal value.')
self.Lcid = struct.unpack('<I', data[0x06:0x0A])[0]
self.ole_logger.debug('ProjectLcidRecord.Lcid: ' + str(hex(self.Lcid)))
if self.Lcid != 0x409:
self._raise_exception('ProjectLcidRecord.Lcid has an abnormal value.')
class ProjectLcidInvokeRecord(VBABase):
Id = 0
Size = 0
LcidInvoke = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.LcidInvoke = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectLcidInvokeRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x14:
self._raise_exception('ProjectLcidInvokeRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectLcidInvokeRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x04:
self._raise_exception('ProjectLcidInvokeRecord.Size has an abnormal value.')
self.LcidInvoke = struct.unpack('<I', data[0x06:0x0A])[0]
self.ole_logger.debug('ProjectLcidInvokeRecord.LcidInvoke: ' + str(hex(self.LcidInvoke)))
if self.LcidInvoke != 0x409:
self._raise_exception('ProjectLcidInvokeRecord.LcidInvoke has an abnormal value.')
class ProjectCodePageRecord(VBABase):
Id = 0
Size = 0
CodePage = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.CodePage = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectCodePageRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x03:
self._raise_exception('ProjectCodePageRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectCodePageRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x02:
self._raise_exception('ProjectCodePageRecord.Size has an abnormal value.')
self.CodePage = struct.unpack('<H', data[0x06:0x08])[0]
self.ole_logger.debug('ProjectCodePageRecord.CodePage: ' + str(hex(self.CodePage)))
class ProjectNameRecord(VBABase):
Id = 0
Size = 0
ProjectName = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.ProjectName = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectNameRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x04:
self._raise_exception('ProjectNameRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectNameRecord.Size: ' + str(hex(self.Size)))
if self.Size < 0x01 or self.Size > 0x80:
self._raise_exception('ProjectNameRecord.Size has an abnormal value.')
self.ProjectName = data[0x06:0x06+self.Size]
self.ole_logger.debug('ProjectNameRecord.ProjectName: ' + self.ProjectName)
class ProjectDocStringRecord(VBABase):
Id = 0
SizeOfDocString = 0
DocString = ''
Reserved = 0
SizeOfDocStringUnicode = 0
DocStringUnicode = ''
def __init__(self, data):
self.Id = 0
self.SizeOfDocString = 0
self.DocString = ''
self.Reserved = 0
self.SizeOfDocStringUnicode = 0
self.DocStringUnicode = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectDocStringRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x05:
self._raise_exception('ProjectDocStringRecord.Id has an abnormal value.')
self.SizeOfDocString = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectDocStringRecord.SizeOfDocString: ' + str(hex(self.SizeOfDocString)))
if self.SizeOfDocString > 2000:
self._raise_exception('ProjectDocStringRecord.SizeOfDocString has an abnormal value.')
self.DocString = data[0x06:0x06+self.SizeOfDocString]
self.ole_logger.debug('ProjectDocStringRecord.DocString: ' + self.DocString)
current = 0x06 + self.SizeOfDocString
self.Reserved = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ProjectDocStringRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x40:
self._raise_exception('ProjectDocStringRecord.Reserved has an abnormal value.')
current = current + 0x02
self.SizeOfDocStringUnicode = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ProjectDocStringRecord.SizeOfDocStringUnicode: ' + str(hex(self.SizeOfDocStringUnicode)))
if self.SizeOfDocStringUnicode > 2000*2:
self._raise_exception('ProjectDocStringRecord.SizeOfDocStringUnicode has an abnormal value.')
current = current + 0x04
self.DocStringUnicode = data[current:current+self.SizeOfDocStringUnicode]
self.ole_logger.debug('ProjectDocStringRecord.DocStringUnicode: ' + self.DocStringUnicode)
class ProjectHelpFilePathRecord(VBABase):
Id = 0
SizeOfHelpFile1 = 0
HelpFile1 = ''
Reserved = 0
SizeOfHelpFile2 = 0
HelpFile2 = ''
def __init__(self, data):
self.Id = 0
self.SizeOfHelpFile1 = 0
self.HelpFile1 = ''
self.Reserved = 0
self.SizeOfHelpFile2 = 0
self.HelpFile2 = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectHelpFilePathRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x06:
self._raise_exception('ProjectHelpFilePathRecord.Id has an abnormal value.')
self.SizeOfHelpFile1 = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectHelpFilePathRecord.SizeOfHelpFile1: ' + str(hex(self.SizeOfHelpFile1)))
if self.SizeOfHelpFile1 > 260:
self._raise_exception('ProjectHelpFilePathRecord.SizeOfHelpFile1 has an abnormal value.')
self.HelpFile1 = data[0x06:0x06+self.SizeOfHelpFile1]
self.ole_logger.debug('ProjectHelpFilePathRecord.HelpFile1: ' + self.HelpFile1)
current = 0x06 + self.SizeOfHelpFile1
self.Reserved = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ProjectHelpFilePathRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x3D:
self._raise_exception('ProjectHelpFilePathRecord.Reserved has an abnormal value.')
current = current + 0x02
self.SizeOfHelpFile2 = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ProjectHelpFilePathRecord.SizeOfHelpFile2: ' + str(hex(self.SizeOfHelpFile2)))
if self.SizeOfHelpFile2 > 260:
self._raise_exception('ProjectHelpFilePathRecord.SizeOfHelpFile2 has an abnormal value.')
current = current + 0x04
self.HelpFile2 = data[current:current+self.SizeOfHelpFile2]
self.ole_logger.debug('ProjectHelpFilePathRecord.HelpFile2: ' + self.HelpFile2)
class ProjectHelpContextRecord(VBABase):
Id = 0
Size = 0
HelpContext = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.HelpContext = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectHelpContextRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x07:
self._raise_exception('ProjectHelpContextRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectHelpContextRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x04:
self._raise_exception('ProjectHelpContextRecord.Size has an abnormal value.')
self.HelpContext = struct.unpack('<I', data[0x06:0x0A])[0]
self.ole_logger.debug('ProjectHelpContextRecord.HelpContext: ' + str(hex(self.HelpContext)))
class ProjectLibFlagsRecord(VBABase):
Id = 0
Size = 0
LibFlags = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.LibFlags = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectLibFlagsRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x8:
self._raise_exception('ProjectLibFlagsRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectLibFlagsRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x04:
self._raise_exception('ProjectLibFlagsRecord.Size has an abnormal value.')
self.LibFlags = struct.unpack('<I', data[0x06:0x0A])[0]
self.ole_logger.debug('ProjectLibFlagsRecord.LibFlags: ' + str(hex(self.LibFlags)))
if self.LibFlags != 0x00:
self._raise_exception('ProjectLibFlagsRecord.LibFlags has an abnormal value.')
class ProjectVersionRecord(VBABase):
Id = 0
Size = 0
MajorVersion = 0
MinorVersion = 0
def __init__(self, data):
self.Id = 0
self.Reserved = 0
self.MajorVersion = 0
self.MinorVersion = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectVersionRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x9:
self._raise_exception('ProjectVersionRecord.Id has an abnormal value.')
self.Reserved = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectVersionRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x04:
self._raise_exception('ProjectVersionRecord.Reserved has an abnormal value.')
self.MajorVersion = struct.unpack('<I', data[0x06:0x0A])[0]
self.ole_logger.debug('ProjectVersionRecord.MajorVersion: ' + str(hex(self.MajorVersion)))
self.MinorVersion = struct.unpack('<H', data[0x0A:0x0C])[0]
self.ole_logger.debug('ProjectVersionRecord.MinorVersion: ' + str(hex(self.MinorVersion)))
class ProjectConstantsRecord(VBABase):
Id = 0
SizeOfConstants = 0
Constants = ''
Reserved = 0
SizeOfConstantsUnicode = 0
ConstantsUnicode = ''
def __init__(self, data):
self.Id = 0
self.SizeOfConstants = 0
self.Constants = ''
self.Reserved = 0
self.SizeOfConstantsUnicode = 0
self.ConstantsUnicode = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectConstantsRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x0C:
self._raise_exception('ProjectConstantsRecord.Id has an abnormal value.')
self.SizeOfConstants = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectConstantsRecord.SizeOfConstants: ' + str(hex(self.SizeOfConstants)))
if self.SizeOfConstants > 1015:
self._raise_exception('ProjectConstantsRecord.SizeOfConstants has an abnormal value.')
self.Constants = data[0x06:0x06+self.SizeOfConstants]
self.ole_logger.debug('ProjectConstantsRecord.Constants: ' + self.Constants)
current = 0x06 + self.SizeOfConstants
self.Reserved = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ProjectConstantsRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x3C:
self._raise_exception('ProjectConstantsRecord.Reserved has an abnormal value.')
current = current + 0x02
self.SizeOfConstantsUnicode = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ProjectConstantsRecord.SizeOfConstantsUnicode: ' + str(hex(self.SizeOfConstantsUnicode)))
if self.SizeOfConstantsUnicode > 1015*2:
self._raise_exception('ProjectConstantsRecord.SizeOfConstantsUnicode has an abnormal value.')
current = current + 0x04
self.ConstantsUnicode = data[current:current+self.SizeOfConstantsUnicode]
self.ole_logger.debug('ProjectConstantsRecord.ConstantsUnicode: ' + self.ConstantsUnicode)
class ProjectInformationRecord(VBABase):
SysKindRecord = None
LcidRecord = None
LcidInvokeRecord = None
CodePageRecord = None
NameRecord = None
DocStringRecord = None
HelpFilePathRecord = None
HelpContextRecord = None
LibFlagsRecord = None
VersionRecord = None
ConstantsRecord = None
Size = 0
def __init__(self, data):
self.SysKindRecord = None
self.LcidRecord = None
self.LcidInvokeRecord = None
self.NameRecord = None
self.DocStringRecord = None
self.HelpFilePathRecord = None
self.HelpContextRecord = None
self.LibFlagsRecord = None
self.VersionRecord = None
self.ConstantsRecord = None
self.Size = 0
self.SysKindRecord = ProjectSysKindRecord(data[0x00:0x0A])
self.LcidRecord = ProjectLcidRecord(data[0x0A:0x14])
self.LcidInvokeRecord = ProjectLcidInvokeRecord(data[0x14:0x1E])
self.CodePageRecord = ProjectCodePageRecord(data[0x1E:0x26])
self.NameRecord = ProjectNameRecord(data[0x26:])
current = 0x26 + 0x06 + self.NameRecord.Size
self.DocStringRecord = ProjectDocStringRecord(data[current:])
current = current + 0x0C + self.DocStringRecord.SizeOfDocString + self.DocStringRecord.SizeOfDocStringUnicode
self.HelpFilePathRecord = ProjectHelpFilePathRecord(data[current:])
current = current + 0x0C + self.HelpFilePathRecord.SizeOfHelpFile1 + self.HelpFilePathRecord.SizeOfHelpFile2
self.HelpContextRecord = ProjectHelpContextRecord(data[current:])
current = current + 0x0A
self.LibFlagsRecord = ProjectLibFlagsRecord(data[current:])
current = current + 0x0A
self.VersionRecord = ProjectVersionRecord(data[current:])
current = current + 0x0C
self.ConstantsRecord = ProjectConstantsRecord(data[current:])
self.Size = current + 0x0C + self.ConstantsRecord.SizeOfConstants + self.ConstantsRecord.SizeOfConstantsUnicode
class ReferenceNameRecord(VBABase):
Id = 0
SizeOfName = 0
Name = ''
Reserved = 0
SizeOfNameUnicode = 0
NameUnicode = ''
def __init__(self, data):
self.Id = 0
self.SizeOfName = 0
self.Name = ''
self.Reserved = 0
self.SizeOfNameUnicode = 0
self.NameUnicode = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ReferenceNameRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x16:
self._raise_exception('ReferenceNameRecord.Id has an abnormal value.')
self.SizeOfName = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ReferenceNameRecord.SizeOfName: ' + str(hex(self.SizeOfName)))
self.Name = data[0x06:0x06+self.SizeOfName]
self.ole_logger.debug('ReferenceNameRecord.Name: ' + self.Name)
current = 0x06 + self.SizeOfName
self.Reserved = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceNameRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x3E:
self.ole_logger.warn('ReferenceNameRecord.Reserved has an abnormal value.')
current = current + 0x02
self.SizeOfNameUnicode = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceNameRecord.SizeOfNameUnicode: ' + str(hex(self.SizeOfNameUnicode)))
current = current + 0x04
self.NameUnicode = data[current:current+self.SizeOfNameUnicode]
self.ole_logger.debug('ReferenceNameRecord.NameUnicode: ' + self.NameUnicode)
class ReferenceOriginalRecord(VBABase):
Id = 0
SizeOfLibidOriginal = 0
LibidOriginal = ''
def __init__(self, data):
self.Id = 0
self.SizeOfLibidOriginal = 0
self.LibidOriginal = ''
self.id = struct.unpack('<H', data[0x00:0x02])[0]
if self.id != 0x33:
self._raise_exception('ReferenceOriginalRecord.Id has an abnormal value.')
self.SizeOfLibidOriginal = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ReferenceOriginalRecord.SizeOfLibidOriginal: ' + str(hex(self.SizeOfLibidOriginal)))
self.LibidOriginal = data[0x06:0x06+self.SizeOfLibidOriginal]
self.ole_logger.debug('ReferenceOriginalRecord.LibidOriginal: ' + self.LibidOriginal)
class ReferenceControlRecord(VBABase):
OriginalRecord = None
Id = 0
SizeTwiddled = 0
SizeOfLibidTwiddled = 0
LibidTwiddled = ''
Reserved1 = 0
Reserved2 = 0
NameRecordExtended = None
Reserved3 = 0
SizeExtended = 0
SizeOfLibidExtended = 0
LibidExtended = ''
Reserved4 = 0
Reserved5 = 0
OriginalTypeLib = ''
Cookie = 0
Size = 0
def __init__(self, data):
self.OriginalRecord = None
self.Id = 0
self.SizeTwiddled = 0
self.SizeOfLibidTwiddled = 0
self.LibidTwiddled = ''
self.Reserved1 = 0
self.Reserved2 = 0
self.NameRecordExtended = None
self.Reserved3 = 0
self.SizeExtended = 0
self.SizeOfLibidExtended = 0
self.LibidExtended = ''
self.Reserved4 = 0
self.Reserved5 = 0
self.OriginalTypeLib = ''
self.Cookie = 0
self.Size = 0
current = 0
id = struct.unpack('<H', data[current:current+0x02])[0]
if id == 0x33:
self.OriginalRecord = ReferenceOriginalRecord(data)
current = current + 0x06 + self.OriginalRecord.SizeOfLibidOriginal
self.Id = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceControlRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x2F:
self._raise_exception('ReferenceControlRecord.Id has an abnormal value.')
current = current + 0x02
self.SizeTwiddled = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceControlRecord.SizeTwiddled: ' + str(hex(self.SizeTwiddled)))
current = current + 0x04
self.SizeOfLibidTwiddled = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceControlRecord.SizeOfLibidTwiddled: ' + str(hex(self.SizeOfLibidTwiddled)))
current = current + 0x04
self.LibidTwiddled = data[current:current+self.SizeOfLibidTwiddled]
self.ole_logger.debug('ReferenceControlRecord.LibidTwiddled: ' + self.LibidTwiddled)
current = current + self.SizeOfLibidTwiddled
self.Reserved1 = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceControlRecord.Reserved1: ' + str(hex(self.Reserved1)))
if self.Reserved1 != 0x00:
self._raise_exception('ReferenceControlRecord.Reserved1 has an abnormal value.')
current = current + 0x04
self.Reserved2 = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceControlRecord.Reserved2: ' + str(hex(self.Reserved2)))
if self.Reserved2 != 0x00:
self._raise_exception('ReferenceControlRecord.Reserved2 has an abnormal value.')
current = current + 0x02
id = struct.unpack('<H', data[current:current+0x02])[0]
if id == 0x16:
self.NameRecordExtended = ReferenceNameRecord(data[current:])
current = current + 0x0C + self.NameRecordExtended.SizeOfName + self.NameRecordExtended.SizeOfNameUnicode
self.Reserved3 = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceControlRecord.Reserved3: ' + str(hex(self.Reserved3)))
if self.Reserved3 != 0x30:
self._raise_exception('ReferenceControlRecord.Reserved3 has an abnormal value.')
current = current + 0x02
self.SizeExtended = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceControlRecord.SizeExtended: ' + str(hex(self.SizeExtended)))
current = current + 0x04
self.SizeOfLibidExtended = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceControlRecord.SizeOfLibidExtended: ' + str(hex(self.SizeOfLibidExtended)))
current = current + 0x04
self.LibidExtended = data[current:current+self.SizeOfLibidExtended]
self.ole_logger.debug('ReferenceControlRecord.LibidExtended: ' + self.LibidExtended)
current = current + self.SizeOfLibidExtended
self.Reserved4 = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceControlRecord.Reserved4: ' + str(hex(self.Reserved4)))
if self.Reserved4 != 0x00:
self._raise_exception('ReferenceControlRecord.Reserved4 has an abnormal value.')
current = current + 0x04
self.Reserved5 = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceControlRecord.Reserved5: ' + str(hex(self.Reserved5)))
if self.Reserved5 != 0x00:
self._raise_exception('ReferenceControlRecord.Reserved5 has an abnormal value.')
current = current + 0x02
self.OriginalTypeLib = data[current:current+0x10]
self.ole_logger.debug('ReferenceControlRecord.OriginalTypeLib: ' + self.OriginalTypeLib.encode('hex'))
current = current + 0x10
self.Cookie = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceControlRecord.Cookie: ' + str(hex(self.Cookie)))
self.Size = current + 0x04
class ReferenceRegisteredRecord(VBABase):
Id = 0
Size = 0
SizeOfLibid = 0
Libid = ''
Reserved1 = 0
Reserved2 = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.SizeOfLibid = 0
self.Libid = ''
self.Reserved1 = 0
self.Reserved2 = 0
current = 0
self.Id = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceRegisteredRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x0D:
self._raise_exception('ReferenceRegisteredRecord.Id has an abnormal value.')
current = current + 0x02
self.Size = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceRegisteredRecord.Size: ' + str(hex(self.Size)))
current = current + 0x04
self.SizeOfLibid = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceRegisteredRecord.SizeOfLibid: ' + str(hex(self.SizeOfLibid)))
current = current + 0x04
self.Libid = data[current:current+self.SizeOfLibid]
self.ole_logger.debug('ReferenceRegisteredRecord.Libid: ' + self.Libid)
current = current + self.SizeOfLibid
self.Reserved1 = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceRegisteredRecord.Reserved1: ' + str(hex(self.Reserved1)))
if self.Reserved1 != 0x00:
self._raise_exception('ReferenceRegisteredRecord.Reserved1 has an abnormal value.')
current = current + 0x04
self.Reserved2 = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceRegisteredRecord.Reserved2: ' + str(hex(self.Reserved2)))
if self.Reserved2 != 0x00:
self._raise_exception('ReferenceRegisteredRecord.Reserved2 has an abnormal value.')
class ReferenceProjectRecord(VBABase):
Id = 0
Size = 0
SizeOfLibidAbsolute = 0
LibidAbsolute = ''
SizeOfLibidRelative = 0
LibidRelative = ''
MajorVersion = 0
MinorVersion = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.SizeOfLibidAbsolute = 0
self.LibidAbsolute = ''
self.SizeOfLibidRelative = 0
self.LibidRelative = ''
self.MajorVersion = 0
self.MinorVersion = 0
current = 0
self.Id = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceProjectRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x0E:
self._raise_exception('ReferenceProjectRecord.Id has an abnormal value.')
current = current + 0x02
self.Size = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceProjectRecord.Size: ' + str(hex(self.Size)))
current = current + 0x04
self.SizeOfLibidAbsolute = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceProjectRecord.SizeOfLibidAbsolute: ' + str(hex(self.SizeOfLibidAbsolute)))
current = current + 0x04
self.LibidAbsolute = data[current:current+self.SizeOfLibidAbsolute]
self.ole_logger.debug('ReferenceProjectRecord.LibidAbsolute: ' + self.LibidAbsolute)
current = current + self.SizeOfLibidAbsolute
self.SizeOfLibidRelative = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceProjectRecord.SizeOfLibidRelative: ' + str(hex(self.SizeOfLibidRelative)))
current = current + 0x04
self.LibidRelative = data[current:current+self.SizeOfLibidRelative]
self.ole_logger.debug('ReferenceProjectRecord.LibidRelative: ' + self.LibidRelative)
current = current + self.SizeOfLibidRelative
self.MajorVersion = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ReferenceProjectRecord.MajorVersion: ' + str(hex(self.MajorVersion)))
current = current + 0x04
self.MinorVersion = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ReferenceProjectRecord.MinorVersion: ' + str(hex(self.MinorVersion)))
class ProjectReferencesRecord(VBABase):
ReferenceArray = list()
Size = 0
def __init__(self, data):
self.ReferenceArray = list()
self.Size = 0
current = 0
NameRecord = None
ControlRecord = None
RegisteredRecord = None
ProjectRecord = None
while True:
id = struct.unpack('<H', data[current:current+2])[0]
if id == 0x0F:
self.Size = current
break
elif id == 0x16:
NameRecord = ReferenceNameRecord(data[current:])
current = current + 0x0C + NameRecord.SizeOfName + NameRecord.SizeOfNameUnicode
id = struct.unpack('<H', data[current:current+2])[0]
if id == 0x2F or id == 0x33:
ControlRecord = ReferenceControlRecord(data[current:])
current = current + ControlRecord.Size
self.ReferenceArray.append([NameRecord, ControlRecord])
elif id == 0x0D:
RegisteredRecord = ReferenceRegisteredRecord(data[current:])
current = current + 0x06 + RegisteredRecord.Size
self.ReferenceArray.append([NameRecord, RegisteredRecord])
elif id == 0x0E:
ProjectRecord = ReferenceProjectRecord(data[current:])
current = current + 0x06 + ProjectRecord.Size
self.ReferenceArray.append([NameRecord, ProjectRecord])
else:
self.ole_logger.warn('ReferencesRecord.Id has an abnormal value.')
elif id == 0x2F or id == 0x33:
ControlRecord = ReferenceControlRecord(data[current:])
current = current + ControlRecord.Size
self.ReferenceArray.append([None, ControlRecord])
elif id == 0x0D:
RegisteredRecord = ReferenceRegisteredRecord(data[current:])
current = current + 0x06 + RegisteredRecord.Size
self.ReferenceArray.append([None, RegisteredRecord])
elif id == 0x0E:
ProjectRecord = ReferenceProjectRecord(data[current:])
current = current + 0x06 + ProjectRecord.Size
self.ReferenceArray.append([None, ProjectRecord])
else:
self._raise_exception('ReferencesRecord.Id has an abnormal value.')
class ProjectCookieRecord(VBABase):
Id = 0
Size = 0
Cookie = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.CodePage = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ProjectCookieRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x13:
self._raise_exception('ProjectCookieRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ProjectCookieRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x02:
self._raise_exception('ProjectCookieRecord.Size has an abnormal value.')
self.Cookie = struct.unpack('<H', data[0x06:0x08])[0]
self.ole_logger.debug('ProjectCookieRecord.Cookie: ' + str(hex(self.Cookie)))
class ModuleNameRecord(VBABase):
Id = 0
SizeOfModuleName = 0
ModuleName = ''
def __init__(self, data):
self.Id = 0
self.SizeOfModuleName = 0
self.ModuleName = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleNameRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x19:
self._raise_exception('ModuleNameRecord.Id has an abnormal value.')
self.SizeOfModuleName = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleNameRecord.Size: ' + str(hex(self.SizeOfModuleName)))
self.ModuleName = data[0x06:0x06+self.SizeOfModuleName]
self.ole_logger.debug('ModuleNameRecord.ModuleName: ' + self.ModuleName)
class ModuleNameUnicodeRecord(VBABase):
Id = 0
SizeOfModuleNameUnicode = 0
ModuleNameUnicode = ''
def __init__(self, data):
self.Id = 0
self.SizeOfModuleName = 0
self.ModuleNameUnicode = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleNameUnicodeRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x47:
self._raise_exception('ModuleNameUnicodeRecord.Id has an abnormal value.')
self.SizeOfModuleNameUnicode = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleNameUnicodeRecord.SizeOfModuleNameUnicode: ' + str(hex(self.SizeOfModuleNameUnicode)))
self.ModuleNameUnicode = data[0x06:0x06+self.SizeOfModuleNameUnicode]
self.ole_logger.debug('ModuleNameUnicodeRecord.ModuleName: ' + self.ModuleNameUnicode)
class ModuleStreamNameRecord(VBABase):
Id = 0
SizeOfStreamName = 0
StreamName = ''
Reserved = 0
SizeOfStreamNameUnicode = 0
StreamNameUnicode = ''
def __init__(self, data):
self.Id = 0
self.SizeOfStreamName = 0
self.StreamName = ''
self.Reserved = 0
self.SizeOfStreamNameUnicode = 0
self.StreamNameUnicode = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleStreamNameRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x1A:
self._raise_exception('ModuleStreamNameRecord.Id has an abnormal value.')
self.SizeOfStreamName = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleStreamNameRecord.SizeOfStreamName: ' + str(hex(self.SizeOfStreamName)))
self.StreamName = data[0x06:0x06+self.SizeOfStreamName]
self.ole_logger.debug('ModuleStreamNameRecord.StreamName: ' + self.StreamName)
current = 0x06 + self.SizeOfStreamName
self.Reserved = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ModuleStreamNameRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x32:
self._raise_exception('ModuleStreamNameRecord.Reserved has an abnormal value.')
current = current + 0x02
self.SizeOfStreamNameUnicode = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ModuleStreamNameRecord.SizeOfStreamNameUnicode: ' + str(hex(self.SizeOfStreamNameUnicode)))
current = current + 0x04
self.StreamNameUnicode = data[current:current+self.SizeOfStreamNameUnicode]
self.ole_logger.debug('ModuleStreamNameRecord.StreamNameUnicode: ' + self.StreamNameUnicode)
class ModuleDocStringRecord(VBABase):
Id = 0
SizeOfDocString = 0
DocString = ''
Reserved = 0
SizeOfDocStringUnicode = 0
DocStringUnicode = ''
def __init__(self, data):
self.Id = 0
self.SizeOfDocString = 0
self.DocString = ''
self.Reserved = 0
self.SizeOfDocStringUnicode = 0
self.DocStringUnicode = ''
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleDocStringRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x1C:
self._raise_exception('ModuleDocStringRecord.Id has an abnormal value.')
self.SizeOfDocString = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleDocStringRecord.SizeOfDocString: ' + str(hex(self.SizeOfDocString)))
self.DocString = data[0x06:0x06+self.SizeOfDocString]
self.ole_logger.debug('ModuleDocStringRecord.DocString: ' + self.DocString)
current = 0x06 + self.SizeOfDocString
self.Reserved = struct.unpack('<H', data[current:current+0x02])[0]
self.ole_logger.debug('ModuleDocStringRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x48:
self._raise_exception('ModuleDocStringRecord.Reserved has an abnormal value.')
current = current + 0x02
self.SizeOfDocStringUnicode = struct.unpack('<I', data[current:current+0x04])[0]
self.ole_logger.debug('ModuleDocStringRecord.SizeOfDocStringUnicode: ' + str(hex(self.SizeOfDocStringUnicode)))
current = current + 0x04
self.DocStringUnicode = data[current:current+self.SizeOfDocStringUnicode]
self.ole_logger.debug('ModuleDocStringRecord.DocStringUnicode: ' + self.DocStringUnicode)
class ModuleOffsetRecord(VBABase):
Id = 0
Size = 0
TextOffset = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.TextOffset = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleOffsetRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x31:
self._raise_exception('ModuleOffsetRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleOffsetRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x04:
self._raise_exception('ModuleOffsetRecord.Size has an abnormal value.')
self.TextOffset = struct.unpack('<I', data[0x06:0x0A])[0]
self.ole_logger.debug('ModuleOffsetRecord.TextOffset: ' + str(hex(self.TextOffset)))
class ModuleHelpContextRecord(VBABase):
Id = 0
Size = 0
HelpContext = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.HelpContext = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleHelpContextRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x1E:
self._raise_exception('ModuleHelpContextRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleHelpContextRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x04:
self._raise_exception('ModuleHelpContextRecord.Size has an abnormal value.')
self.HelpContext = struct.unpack('<I', data[0x06:0x0A])[0]
self.ole_logger.debug('ModuleHelpContextRecord.HelpContext: ' + str(hex(self.HelpContext)))
class ModuleCookieRecord(VBABase):
Id = 0
Size = 0
Cookie = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.Cookie = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleCookieRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x2C:
self._raise_exception('ModuleCookieRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleCookieRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x02:
self._raise_exception('ModuleCookieRecord.Size has an abnormal value.')
self.Cookie = struct.unpack('<H', data[0x06:0x08])[0]
self.ole_logger.debug('ModuleCookieRecord.Cookie: ' + str(hex(self.Cookie)))
class ModuleTypeRecord(VBABase):
Id = 0
Reserved = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleTypeRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x21 and self.Id != 0x22:
self._raise_exception('ModuleTypeRecord.Id has an abnormal value.')
self.Reserved = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleTypeRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x00:
self._raise_exception('ModuleTypeRecord.Reserved has an abnormal value.')
class ModuleReadOnlyRecord(VBABase):
Id = 0
Reserved = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModuleReadOnlyRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x25:
self._raise_exception('ModuleReadOnlyRecord.Id has an abnormal value.')
self.Reserved = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModuleReadOnlyRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x00:
self._raise_exception('ModuleReadOnlyRecord.Reserved has an abnormal value.')
class ModulePrivateRecord(VBABase):
Id = 0
Reserved = 0
def __init__(self, data):
self.Id = 0
self.Size = 0
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModulePrivateRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x28:
self._raise_exception('ModulePrivateRecord.Id has an abnormal value.')
self.Reserved = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModulePrivateRecord.Reserved: ' + str(hex(self.Reserved)))
if self.Reserved != 0x00:
self._raise_exception('ModulePrivateRecord.Reserved has an abnormal value.')
class ModuleRecord(VBABase):
NameRecord = None
NameUnicodeRecord = None
StreamNameRecord = None
DocStringRecord = None
OffsetRecord = None
HelpContextRecord = None
CookieRecord = None
TypeRecord = None
ReadOnlyRecord = None
PrivateRecord = None
Terminator = 0
Reserved = 0
Size = 0
def __init__(self, data):
self.NameRecord = None
self.NameUnicodeRecord = None
self.StreamNameRecord = None
self.DocStringRecord = None
self.OffsetRecord = None
self.HelpContextRecord = None
self.CookieRecord = None
self.TypeRecord = None
self.ReadOnlyRecord = None
self.PrivateRecord = None
self.Terminator = 0
self.Reserved = 0
self.Size = 0
current = 0
self.NameRecord = ModuleNameRecord(data)
current = current + 0x06 + self.NameRecord.SizeOfModuleName
self.NameUnicodeRecord = ModuleNameUnicodeRecord(data[current:])
current = current + 0x06 + self.NameUnicodeRecord.SizeOfModuleNameUnicode
self.StreamNameRecord = ModuleStreamNameRecord(data[current:])
current = current + 0x0C + self.StreamNameRecord.SizeOfStreamName + self.StreamNameRecord.SizeOfStreamNameUnicode
self.DocStringRecord = ModuleDocStringRecord(data[current:])
current = current + 0x0C + self.DocStringRecord.SizeOfDocString + self.DocStringRecord.SizeOfDocStringUnicode
self.OffsetRecord = ModuleOffsetRecord(data[current:current+0x0A])
current = current + 0x0A
self.HelpContextRecord = ModuleHelpContextRecord(data[current:current+0x0A])
current = current + 0x0A
self.CookieRecord = ModuleCookieRecord(data[current:current+0x08])
current = current + 0x08
self.TypeRecord = ModuleTypeRecord(data[current:current+0x06])
while True:
current = current + 0x06
id = struct.unpack('<H', data[current:current+0x02])[0]
if id == 0x25:
self.ReadOnlyRecord = ModuleReadOnlyRecord(data[current:current+0x06])
elif id == 0x28:
self.PrivateRecord = ModulePrivateRecord(data[current:current+0x06])
elif id == 0x2B:
self.Terminator = struct.unpack('<H', data[current:current+0x02])[0]
break
else:
self._raise_exception('ModuleRecord contains an abnormal record id.')
current = current + 0x02
self.Reserved = struct.unpack('<I', data[current:current+0x04])[0]
if self.Size != 0x00:
self._raise_exception('ModuleRecord.Reserved has an abnormal value.')
self.Size = current + 0x04
class ProjectModulesRecord(VBABase):
Id = 0
Size = 0
Count = 0
CookieRecord = None
ModuleArray = list()
def __init__(self, data):
self.Id = 0
self.Size = 0
self.Count = 0
self.CookieRecord = None
self.ModuleArray = list()
self.Id = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('ModulesRecord.Id: ' + str(hex(self.Id)))
if self.Id != 0x0F:
self._raise_exception('ModulesRecord.Id has an abnormal value.')
self.Size = struct.unpack('<I', data[0x02:0x06])[0]
self.ole_logger.debug('ModulesRecord.Size: ' + str(hex(self.Size)))
if self.Size != 0x02:
self._raise_exception('ModulesRecord.Size has an abnormal value.')
self.Count = struct.unpack('<H', data[0x06:0x08])[0]
self.ole_logger.debug('ModulesRecord.Count: ' + str(hex(self.Count)))
self.CookieRecord = ProjectCookieRecord(data[0x08:0x10])
current = 0x10
for i in range(0, self.Count):
Module = ModuleRecord(data[current:])
self.ModuleArray.append(Module)
current = current + Module.Size
class DirStream(VBABase):
InformationRecord = None
ReferencesRecord = None
ModulesRecord = None
def __init__(self, data):
self.InformationRecord = None
self.ReferencesRecord = None
self.ModulesRecord = None
self.ole_logger.debug('######## dirStream ########')
data = self._decompress(data)
self.InformationRecord = ProjectInformationRecord(data)
current = self.InformationRecord.Size
self.ReferencesRecord = ProjectReferencesRecord(data[current:])
current = current + self.ReferencesRecord.Size
self.ModulesRecord = ProjectModulesRecord(data[current:])
class VBAFile(VBABase):
OLE = None
PROJECT = None
PROJECTwm = None
VBA_PROJECT = None
dir = None
def __init__(self, filename):
self.OLE = None
self.PROJECT = None
self.PROJECTwm = None
self.VBA_PROJECT = None
self.dir = None
self.OLE = OLEFile(filename)
project_data = self.OLE.find_object_by_name('PROJECT')
if project_data is not None:
self.PROJECT = ProjectStream(project_data)
else:
self.ole_logger.warn('VBA project does not contain the PROJECT stream.')
projectwm_data = self.OLE.find_object_by_name('PROJECTwm')
if projectwm_data is not None:
self.PROJECTwm = Projectwm(projectwm_data)
vba_project_data = self.OLE.find_object_by_name('_VBA_PROJECT')
if vba_project_data is not None:
self.VBA_PROJECT = VBAProject(vba_project_data)
else:
self.ole_logger.warn('VBA project does not contain the _VBA_PROJECT stream.')
dir_data = self.OLE.find_object_by_name('dir')
if dir_data is not None:
self.dir = DirStream(dir_data)
else:
self._raise_exception('VBA project does not contain the dir stream.')
if __name__ == '__main__':
init_logging(True)
#init_logging(False)
try:
vba = VBAFile('oletest1.doc')
except Exception as e:
print e
| |
"""
Tests the cli
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import unittest
import mock
import ga4gh.cli as cli
import ga4gh.protocol as protocol
import ga4gh.client as client
class TestNoInput(unittest.TestCase):
"""
Test the cli as if there was no input; print_help should be called.
The development server is a special case here, as it works without
arguments.
"""
def setUp(self):
self.parser = StubArgumentParser(self)
def run(self, *args, **kwargs):
super(TestNoInput, self).run(*args, **kwargs)
self.verifyInput()
def verifyInput(self):
self.parser.assertParseArgsCountEquals(1)
self.parser.assertHelpCountEquals(1)
def testGa2VcfNoInput(self):
cli.ga2vcf_main(self.parser)
def testGa2SamNoInput(self):
cli.ga2sam_main(self.parser)
def testCliNoInput(self):
cli.client_main(self.parser)
class TestGa2VcfArguments(unittest.TestCase):
"""
Tests the ga2vcf cli can parse all arguments it is supposed to
"""
def testVariantsSearchArguments(self):
cliInput = """--workarounds WORK,AROUND
--key KEY -O --outputFile /dev/null
--referenceName REFERENCENAME
--variantName VARIANTNAME --callSetIds CALL,SET,IDS --start 0
--end 1 --pageSize 2 BASEURL VARIANTSETID"""
stubConverterModule = StubConverterModuleVcf(self)
with mock.patch(
'ga4gh.converters.VcfConverter',
stubConverterModule):
parser = StubArgumentParserCli(self, cliInput)
cli.ga2vcf_main(parser)
parser.assertParseArgsCountEquals(1)
stubConverterModule.assertVcfConvertCountEquals(1)
class TestGa2SamArguments(unittest.TestCase):
"""
Tests the ga2sam cli can parse all arguments it is supposed to
"""
def testReadsSearchArguments(self):
cliInput = """--workarounds WORK,AROUND --key KEY -O
--pageSize 1 --start 2 --end 3 --outputFile OUT.SAM
--readGroupIds READ,GROUP,IDS --referenceId REFERENCEID
--binaryOutput BASEURL"""
stubConverterModule = StubConverterModuleSam(self)
with mock.patch(
'ga4gh.converters.SamConverter',
stubConverterModule):
parser = StubArgumentParserCli(self, cliInput)
cli.ga2sam_main(parser)
parser.assertParseArgsCountEquals(1)
stubConverterModule.assertSamConvertCountEquals(1)
class TestClientArguments(unittest.TestCase):
"""
Tests the client cli can parse all arguments it is supposed to
and can initialize the runner in preparation for a request
"""
def setUp(self):
# initialize the client parser
self.parser = StubArgumentParser(self)
cli.client_main(self.parser)
def verifyInput(self):
# include arguments common to all commands
inputStr = "--verbose --workarounds WORK,AROUND --key KEY {0} URL"
cliInput = inputStr.format(self.cliInput)
splits = cliInput.split()
# parse the arguments
args = self.parser.parser.parse_args(splits)
# invoke the initializer of the runner (which also parses args)
runner = args.runner(args)
# ensure the correct attributes on the runner are set
if hasattr(runner, '_request'):
self.assertIsInstance(runner._request, protocol.ProtocolElement)
self.assertIsInstance(runner._httpClient, client.HttpClient)
def run(self, *args, **kwargs):
super(TestClientArguments, self).run(*args, **kwargs)
self.verifyInput()
def testVariantsSearchArguments(self):
self.cliInput = """variants-search --referenceName REFERENCENAME
--variantName VARIANTNAME --callSetIds CALL,SET,IDS --start 0
--end 1 --pageSize 2 --variantSetId VARIANTSETIDS"""
def testVariantSetsSearchArguments(self):
self.cliInput = """variantsets-search --pageSize 1 --datasetId
DATASETID"""
def testReferenceSetsSearchArguments(self):
self.cliInput = """referencesets-search --pageSize 1 --accessions
ACC,ESS,IONS --md5checksums MD5,CHECKSUMS --assemblyId ASSEMBLYID"""
def testReferencesSearchArguments(self):
self.cliInput = """references-search --pageSize 1 --accessions
ACC,ESS,IONS --md5checksums MD5,CHECKSUMS"""
def testReadGroupSetsSearchArguments(self):
self.cliInput = """readgroupsets-search --pageSize 1 --datasetId
DATASETID --name NAME"""
def testCallSetsSearchArguments(self):
self.cliInput = """callsets-search --pageSize 1 --name NAME
--variantSetId VARIANTSETID"""
def testReadsSearchArguments(self):
self.cliInput = """reads-search --pageSize 1 --start 2 --end 3
--readGroupIds READ,GROUP,IDS --referenceId REFERENCEID"""
def testDatasetsSearchArguments(self):
self.cliInput = """datasets-search"""
def testReferenceSetGetArguments(self):
self.cliInput = """referencesets-get ID"""
def testReferenceGetArguments(self):
self.cliInput = """references-get ID"""
def testReadGroupSetGetArguments(self):
self.cliInput = """readgroupsets-get ID"""
def testReadGroupGetArguments(self):
self.cliInput = """readgroups-get ID"""
def testCallSetGetArguments(self):
self.cliInput = """callsets-get ID"""
def testReferenceBasesListArguments(self):
self.cliInput = """references-list-bases ID
--start 1 --end 2"""
class StubArgumentParser(object):
"""
A stand-in object for an ArgumentParser that intercepts calls
to parse_args and print_help, but otherwise provides normal
behavior via passing through calls to an attribute ArgumentParser
"""
def __init__(self, currentTest):
self.parser = argparse.ArgumentParser(description="Stub")
self.helpCount = 0
self.parseArgsCount = 0
self.parse_args = self._parseArgs
self.print_help = self._help
self.currentTest = currentTest
def _help(self):
self.helpCount += 1
def _parseArgs(self):
self.parseArgsCount += 1
return ()
def assertParseArgsCountEquals(self, parseArgsCount):
self.currentTest.assertEquals(self.parseArgsCount, parseArgsCount)
def assertHelpCountEquals(self, helpCount):
self.currentTest.assertEquals(self.helpCount, helpCount)
def __getattr__(self, name):
return getattr(self.parser, name)
class StubArgumentParserCli(StubArgumentParser):
"""
Like StubArgumentParser, but returns real arguments from the
parse_args call (that the user provides)
"""
def __init__(self, currentTest, cliInput):
super(StubArgumentParserCli, self).__init__(currentTest)
self.cliInput = cliInput
def _parseArgs(self):
self.parseArgsCount += 1
splits = self.cliInput.split()
return self.parser.parse_args(splits)
class StubConverterModule(object):
"""
A stand-in object for the converter module.
Just provides access to dummy objects.
"""
def __init__(self, currentTest):
self.currentTest = currentTest
self.VcfConverter = StubConverter(self.currentTest)
self.SamConverter = StubConverter(self.currentTest)
def assertVcfConvertCountEquals(self, convertCount):
self.VcfConverter.assertConvertCountEquals(convertCount)
def assertSamConvertCountEquals(self, convertCount):
self.SamConverter.assertConvertCountEquals(convertCount)
class StubConverterModuleSam(StubConverterModule):
"""
The StubConverterModule for Sam tests
"""
def __call__(self, *args):
return self.SamConverter
class StubConverterModuleVcf(StubConverterModule):
"""
The StubConverterModule for Vcf tests
"""
def __call__(self, *args):
return self.VcfConverter
class StubConverter(object):
"""
A stand-in object for a converter that does nothing
"""
def __init__(self, currentTest):
self.currentTest = currentTest
self.convertCount = 0
def convert(self):
self.convertCount += 1
def assertConvertCountEquals(self, convertCount):
self.currentTest.assertEquals(self.convertCount, convertCount)
| |
# -*- coding: utf-8 -*-
"""
sphinx.util
~~~~~~~~~~~
Utility functions for Sphinx.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import fnmatch
import tempfile
import posixpath
import traceback
import unicodedata
from os import path
from codecs import open, BOM_UTF8
from collections import deque
from six import iteritems, text_type, binary_type
from six.moves import range
import docutils
from docutils.utils import relative_path
import jinja2
import sphinx
from sphinx.errors import PycodeError, SphinxParallelError, ExtensionError
from sphinx.util.console import strip_colors
from sphinx.util.osutil import fs_encoding
# import other utilities; partly for backwards compatibility, so don't
# prune unused ones indiscriminately
from sphinx.util.osutil import ( # noqa
SEP, os_path, relative_uri, ensuredir, walk, mtimes_of_files, movefile,
copyfile, copytimes, make_filename, ustrftime)
from sphinx.util.nodes import ( # noqa
nested_parse_with_titles, split_explicit_title, explicit_title_re,
caption_ref_re)
from sphinx.util.matching import patfilter # noqa
# Generally useful regular expressions.
ws_re = re.compile(r'\s+')
url_re = re.compile(r'(?P<schema>.+)://.*')
# High-level utility functions.
def docname_join(basedocname, docname):
return posixpath.normpath(
posixpath.join('/' + basedocname, '..', docname))[1:]
def path_stabilize(filepath):
"normalize path separater and unicode string"
newpath = filepath.replace(os.path.sep, SEP)
if isinstance(newpath, text_type):
newpath = unicodedata.normalize('NFC', newpath)
return newpath
def get_matching_files(dirname, exclude_matchers=()):
"""Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
"""
# dirname is a normalized absolute path.
dirname = path.normpath(path.abspath(dirname))
dirlen = len(dirname) + 1 # exclude final os.path.sep
for root, dirs, files in walk(dirname, followlinks=True):
relativeroot = root[dirlen:]
qdirs = enumerate(path_stabilize(path.join(relativeroot, dn))
for dn in dirs)
qfiles = enumerate(path_stabilize(path.join(relativeroot, fn))
for fn in files)
for matcher in exclude_matchers:
qdirs = [entry for entry in qdirs if not matcher(entry[1])]
qfiles = [entry for entry in qfiles if not matcher(entry[1])]
dirs[:] = sorted(dirs[i] for (i, _) in qdirs)
for i, filename in sorted(qfiles):
yield filename
def get_matching_docs(dirname, suffixes, exclude_matchers=()):
"""Get all file names (without suffixes) matching a suffix in a directory,
recursively.
Exclude files and dirs matching a pattern in *exclude_patterns*.
"""
suffixpatterns = ['*' + s for s in suffixes]
for filename in get_matching_files(dirname, exclude_matchers):
for suffixpattern in suffixpatterns:
if fnmatch.fnmatch(filename, suffixpattern):
yield filename[:-len(suffixpattern)+1]
break
class FilenameUniqDict(dict):
"""
A dictionary that automatically generates unique names for its keys,
interpreted as filenames, and keeps track of a set of docnames they
appear in. Used for images and downloadable files in the environment.
"""
def __init__(self):
self._existing = set()
def add_file(self, docname, newfile):
if newfile in self:
self[newfile][0].add(docname)
return self[newfile][1]
uniquename = path.basename(newfile)
base, ext = path.splitext(uniquename)
i = 0
while uniquename in self._existing:
i += 1
uniquename = '%s%s%s' % (base, i, ext)
self[newfile] = (set([docname]), uniquename)
self._existing.add(uniquename)
return uniquename
def purge_doc(self, docname):
for filename, (docs, unique) in list(self.items()):
docs.discard(docname)
if not docs:
del self[filename]
self._existing.discard(unique)
def merge_other(self, docnames, other):
for filename, (docs, unique) in other.items():
for doc in docs & docnames:
self.add_file(doc, filename)
def __getstate__(self):
return self._existing
def __setstate__(self, state):
self._existing = state
def copy_static_entry(source, targetdir, builder, context={},
exclude_matchers=(), level=0):
"""Copy a HTML builder static_path entry from source to targetdir.
Handles all possible cases of files, directories and subdirectories.
"""
if exclude_matchers:
relpath = relative_path(path.join(builder.srcdir, 'dummy'), source)
for matcher in exclude_matchers:
if matcher(relpath):
return
if path.isfile(source):
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
fsrc = open(source, 'r', encoding='utf-8')
fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
else:
copyfile(source, target)
elif path.isdir(source):
if not path.isdir(targetdir):
os.mkdir(targetdir)
for entry in os.listdir(source):
if entry.startswith('.'):
continue
newtarget = targetdir
if path.isdir(path.join(source, entry)):
newtarget = path.join(targetdir, entry)
copy_static_entry(path.join(source, entry), newtarget,
builder, context, level=level+1,
exclude_matchers=exclude_matchers)
_DEBUG_HEADER = '''\
# Sphinx version: %s
# Python version: %s (%s)
# Docutils version: %s %s
# Jinja2 version: %s
# Last messages:
%s
# Loaded extensions:
'''
def save_traceback(app):
"""Save the current exception's traceback in a temporary file."""
import platform
exc = sys.exc_info()[1]
if isinstance(exc, SphinxParallelError):
exc_format = '(Error in parallel process)\n' + exc.traceback
else:
exc_format = traceback.format_exc()
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
last_msgs = ''
if app is not None:
last_msgs = '\n'.join(
'# %s' % strip_colors(force_decode(s, 'utf-8')).strip()
for s in app.messagelog)
os.write(fd, (_DEBUG_HEADER %
(sphinx.__display_version__,
platform.python_version(),
platform.python_implementation(),
docutils.__version__, docutils.__version_details__,
jinja2.__version__,
last_msgs)).encode('utf-8'))
if app is not None:
for extname, extmod in iteritems(app._extensions):
modfile = getattr(extmod, '__file__', 'unknown')
if isinstance(modfile, bytes):
modfile = modfile.decode(fs_encoding, 'replace')
os.write(fd, ('# %s (%s) from %s\n' % (
extname, app._extension_metadata[extname]['version'],
modfile)).encode('utf-8'))
os.write(fd, exc_format.encode('utf-8'))
os.close(fd)
return path
def get_module_source(modname):
"""Try to find the source code for a module.
Can return ('file', 'filename') in which case the source is in the given
file, or ('string', 'source') which which case the source is the string.
"""
if modname not in sys.modules:
try:
__import__(modname)
except Exception as err:
raise PycodeError('error importing %r' % modname, err)
mod = sys.modules[modname]
filename = getattr(mod, '__file__', None)
loader = getattr(mod, '__loader__', None)
if loader and getattr(loader, 'get_filename', None):
try:
filename = loader.get_filename(modname)
except Exception as err:
raise PycodeError('error getting filename for %r' % filename, err)
if filename is None and loader:
try:
return 'string', loader.get_source(modname)
except Exception as err:
raise PycodeError('error getting source for %r' % modname, err)
if filename is None:
raise PycodeError('no source found for module %r' % modname)
filename = path.normpath(path.abspath(filename))
lfilename = filename.lower()
if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'):
filename = filename[:-1]
if not path.isfile(filename) and path.isfile(filename + 'w'):
filename += 'w'
elif not (lfilename.endswith('.py') or lfilename.endswith('.pyw')):
raise PycodeError('source is not a .py file: %r' % filename)
if not path.isfile(filename):
raise PycodeError('source file is not present: %r' % filename)
return 'file', filename
def get_full_modname(modname, attribute):
__import__(modname)
module = sys.modules[modname]
# Allow an attribute to have multiple parts and incidentially allow
# repeated .s in the attribute.
value = module
for attr in attribute.split('.'):
if attr:
value = getattr(value, attr)
return getattr(value, '__module__', None)
# a regex to recognize coding cookies
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline):
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop():
try:
return readline()
except StopIteration:
return None
def get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace('_', '-')
if enc == 'utf-8' or enc.startswith('utf-8-'):
return 'utf-8'
if enc in ('latin-1', 'iso-8859-1', 'iso-latin-1') or \
enc.startswith(('latin-1-', 'iso-8859-1-', 'iso-latin-1-')):
return 'iso-8859-1'
return orig_enc
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = _coding_re.findall(line_string)
if not matches:
return None
return get_normal_name(matches[0])
default = sys.getdefaultencoding()
first = read_or_stop()
if first and first.startswith(BOM_UTF8):
first = first[3:]
default = 'utf-8-sig'
if not first:
return default
encoding = find_cookie(first)
if encoding:
return encoding
second = read_or_stop()
if not second:
return default
encoding = find_cookie(second)
if encoding:
return encoding
return default
# Low-level utility functions and classes.
class Tee(object):
"""
File-like object writing to two streams.
"""
def __init__(self, stream1, stream2):
self.stream1 = stream1
self.stream2 = stream2
def write(self, text):
self.stream1.write(text)
self.stream2.write(text)
def flush(self):
if hasattr(self.stream1, 'flush'):
self.stream1.flush()
if hasattr(self.stream2, 'flush'):
self.stream2.flush()
def parselinenos(spec, total):
"""Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
items = list()
parts = spec.split(',')
for part in parts:
try:
begend = part.strip().split('-')
if len(begend) > 2:
raise ValueError
if len(begend) == 1:
items.append(int(begend[0])-1)
else:
start = (begend[0] == '') and 0 or int(begend[0])-1
end = (begend[1] == '') and total or int(begend[1])
items.extend(range(start, end))
except Exception:
raise ValueError('invalid line number spec: %r' % spec)
return items
def force_decode(string, encoding):
"""Forcibly get a unicode string out of a bytestring."""
if isinstance(string, binary_type):
try:
if encoding:
string = string.decode(encoding)
else:
# try decoding with utf-8, should only work for real UTF-8
string = string.decode('utf-8')
except UnicodeError:
# last resort -- can't fail
string = string.decode('latin1')
return string
class attrdict(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, val):
self[key] = val
def __delattr__(self, key):
del self[key]
def rpartition(s, t):
"""Similar to str.rpartition from 2.5, but doesn't return the separator."""
i = s.rfind(t)
if i != -1:
return s[:i], s[i+len(t):]
return '', s
def split_into(n, type, value):
"""Split an index entry into a given number of parts at semicolons."""
parts = [x.strip() for x in value.split(';', n-1)]
if sum(1 for part in parts if part) < n:
raise ValueError('invalid %s index entry %r' % (type, value))
return parts
def split_index_msg(type, value):
# new entry types must be listed in directives/other.py!
result = []
try:
if type == 'single':
try:
result = split_into(2, 'single', value)
except ValueError:
result = split_into(1, 'single', value)
elif type == 'pair':
result = split_into(2, 'pair', value)
elif type == 'triple':
result = split_into(3, 'triple', value)
elif type == 'see':
result = split_into(2, 'see', value)
elif type == 'seealso':
result = split_into(2, 'see', value)
except ValueError:
pass
return result
def format_exception_cut_frames(x=1):
"""Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
# res = ['Traceback (most recent call last):\n']
res = []
tbres = traceback.format_tb(tb)
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
return ''.join(res)
class PeekableIterator(object):
"""
An iterator which wraps any iterable and makes it possible to peek to see
what's the next item.
"""
def __init__(self, iterable):
self.remaining = deque()
self._iterator = iter(iterable)
def __iter__(self):
return self
def __next__(self):
"""Return the next item from the iterator."""
if self.remaining:
return self.remaining.popleft()
return next(self._iterator)
next = __next__ # Python 2 compatibility
def push(self, item):
"""Push the `item` on the internal stack, it will be returned on the
next :meth:`next` call.
"""
self.remaining.append(item)
def peek(self):
"""Return the next item without changing the state of the iterator."""
item = next(self)
self.push(item)
return item
def get_figtype(node):
"""Return figtype for given node."""
def has_child(node, cls):
return any(isinstance(child, cls) for child in node)
from docutils import nodes
if isinstance(node, nodes.figure):
return 'figure'
elif isinstance(node, nodes.image) and isinstance(node.parent, nodes.figure):
# bare image node is not supported because it doesn't have caption and
# no-caption-target isn't a numbered figure.
return 'figure'
elif isinstance(node, nodes.table):
return 'table'
elif isinstance(node, nodes.container):
if has_child(node, nodes.literal_block):
return 'code-block'
return None
def import_object(objname, source=None):
try:
module, name = objname.rsplit('.', 1)
except ValueError as err:
raise ExtensionError('Invalid full object name %s' % objname +
(source and ' (needed for %s)' % source or ''),
err)
try:
return getattr(__import__(module, None, None, [name]), name)
except ImportError as err:
raise ExtensionError('Could not import %s' % module +
(source and ' (needed for %s)' % source or ''),
err)
except AttributeError as err:
raise ExtensionError('Could not find %s' % objname +
(source and ' (needed for %s)' % source or ''),
err)
| |
"""The test for the Template sensor platform."""
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.setup import setup_component, async_setup_component
from tests.common import get_test_home_assistant, assert_setup_component
from homeassistant.const import STATE_UNAVAILABLE, STATE_ON, STATE_OFF
class TestTemplateSensor:
"""Test the Template sensor."""
hass = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template(self):
"""Test template."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "It {{ states.sensor.test_state.state }}."
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.state == "It ."
self.hass.states.set("sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.state == "It Works."
def test_icon_template(self):
"""Test icon template."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.test_state.state }}",
"icon_template": "{% if states.sensor.test_state.state == "
"'Works' %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes.get("icon") == ""
self.hass.states.set("sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes["icon"] == "mdi:check"
def test_entity_picture_template(self):
"""Test entity_picture template."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.test_state.state }}",
"entity_picture_template": "{% if states.sensor.test_state.state == "
"'Works' %}"
"/local/sensor.png"
"{% endif %}",
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes.get("entity_picture") == ""
self.hass.states.set("sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes["entity_picture"] == "/local/sensor.png"
def test_friendly_name_template(self):
"""Test friendly_name template."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.test_state.state }}",
"friendly_name_template": "It {{ states.sensor.test_state.state }}.",
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes.get("friendly_name") == "It ."
self.hass.states.set("sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes["friendly_name"] == "It Works."
def test_friendly_name_template_with_unknown_state(self):
"""Test friendly_name template with an unknown value_template."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.fourohfour.state }}",
"friendly_name_template": "It {{ states.sensor.test_state.state }}.",
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes["friendly_name"] == "It ."
self.hass.states.set("sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes["friendly_name"] == "It Works."
def test_attribute_templates(self):
"""Test attribute_templates template."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.test_state.state }}",
"attribute_templates": {
"test_attribute": "It {{ states.sensor.test_state.state }}."
},
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes.get("test_attribute") == "It ."
self.hass.states.set("sensor.test_state", "Works")
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.attributes["test_attribute"] == "It Works."
def test_template_syntax_error(self):
"""Test templating syntax error."""
with assert_setup_component(0):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{% if rubbish %}"
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_template_attribute_missing(self):
"""Test missing attribute template."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "It {{ states.sensor.test_state"
".attributes.missing }}."
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test_template_sensor")
assert state.state == STATE_UNAVAILABLE
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test INVALID sensor": {
"value_template": "{{ states.sensor.test_state.state }}"
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_sensor_does_not_create(self):
"""Test invalid sensor."""
with assert_setup_component(0):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {"test_template_sensor": "invalid"},
}
},
)
self.hass.start()
assert self.hass.states.all() == []
def test_no_sensors_does_not_create(self):
"""Test no sensors."""
with assert_setup_component(0):
assert setup_component(
self.hass, "sensor", {"sensor": {"platform": "template"}}
)
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_template_does_not_create(self):
"""Test missing template."""
with assert_setup_component(0):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"not_value_template": "{{ states.sensor.test_state.state }}"
}
},
}
},
)
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_setup_invalid_device_class(self):
"""Test setup with invalid device_class."""
with assert_setup_component(0):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"device_class": "foobarnotreal",
}
},
}
},
)
def test_setup_valid_device_class(self):
"""Test setup with valid device_class."""
with assert_setup_component(1):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test1": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"device_class": "temperature",
},
"test2": {
"value_template": "{{ states.sensor.test_sensor.state }}"
},
},
}
},
)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test1")
assert state.attributes["device_class"] == "temperature"
state = self.hass.states.get("sensor.test2")
assert "device_class" not in state.attributes
async def test_available_template_with_entities(hass):
"""Test availability tempalates with values from other entities."""
hass.states.async_set("sensor.availability_sensor", STATE_OFF)
with assert_setup_component(1, "sensor"):
assert await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"availability_template": "{{ is_state('sensor.availability_sensor', 'on') }}",
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
# When template returns true..
hass.states.async_set("sensor.availability_sensor", STATE_ON)
await hass.async_block_till_done()
# Device State should not be unavailable
assert hass.states.get("sensor.test_template_sensor").state != STATE_UNAVAILABLE
# When Availability template returns false
hass.states.async_set("sensor.availability_sensor", STATE_OFF)
await hass.async_block_till_done()
# device state should be unavailable
assert hass.states.get("sensor.test_template_sensor").state == STATE_UNAVAILABLE
async def test_invalid_attribute_template(hass, caplog):
"""Test that errors are logged if rendering template fails."""
hass.states.async_set("sensor.test_sensor", "startup")
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"invalid_template": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"attribute_templates": {
"test_attribute": "{{ states.sensor.unknown.attributes.picture }}"
},
}
},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
await hass.helpers.entity_component.async_update_entity("sensor.invalid_template")
assert ("Error rendering attribute test_attribute") in caplog.text
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"my_sensor": {
"value_template": "{{ states.sensor.test_state.state }}",
"availability_template": "{{ x - 12 }}",
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("sensor.my_sensor").state != STATE_UNAVAILABLE
assert ("UndefinedError: 'x' is undefined") in caplog.text
async def test_no_template_match_all(hass, caplog):
"""Test that we do not allow sensors that match on all."""
hass.states.async_set("sensor.test_sensor", "startup")
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "template",
"sensors": {
"invalid_state": {"value_template": "{{ 1 + 1 }}"},
"invalid_icon": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"icon_template": "{{ 1 + 1 }}",
},
"invalid_entity_picture": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"entity_picture_template": "{{ 1 + 1 }}",
},
"invalid_friendly_name": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"friendly_name_template": "{{ 1 + 1 }}",
},
"invalid_attribute": {
"value_template": "{{ states.sensor.test_sensor.state }}",
"attribute_templates": {"test_attribute": "{{ 1 + 1 }}"},
},
},
}
},
)
assert hass.states.get("sensor.invalid_state").state == "unknown"
assert hass.states.get("sensor.invalid_icon").state == "unknown"
assert hass.states.get("sensor.invalid_entity_picture").state == "unknown"
assert hass.states.get("sensor.invalid_friendly_name").state == "unknown"
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 6
assert (
"Template sensor invalid_state has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the value template"
) in caplog.text
assert (
"Template sensor invalid_icon has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the icon template"
) in caplog.text
assert (
"Template sensor invalid_entity_picture has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the entity_picture template"
) in caplog.text
assert (
"Template sensor invalid_friendly_name has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the friendly_name template"
) in caplog.text
assert (
"Template sensor invalid_attribute has no entity ids "
"configured to track nor were we able to extract the entities to "
"track from the test_attribute template"
) in caplog.text
assert hass.states.get("sensor.invalid_state").state == "unknown"
assert hass.states.get("sensor.invalid_icon").state == "unknown"
assert hass.states.get("sensor.invalid_entity_picture").state == "unknown"
assert hass.states.get("sensor.invalid_friendly_name").state == "unknown"
assert hass.states.get("sensor.invalid_attribute").state == "unknown"
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("sensor.invalid_state").state == "2"
assert hass.states.get("sensor.invalid_icon").state == "startup"
assert hass.states.get("sensor.invalid_entity_picture").state == "startup"
assert hass.states.get("sensor.invalid_friendly_name").state == "startup"
assert hass.states.get("sensor.invalid_attribute").state == "startup"
hass.states.async_set("sensor.test_sensor", "hello")
await hass.async_block_till_done()
assert hass.states.get("sensor.invalid_state").state == "2"
assert hass.states.get("sensor.invalid_icon").state == "startup"
assert hass.states.get("sensor.invalid_entity_picture").state == "startup"
assert hass.states.get("sensor.invalid_friendly_name").state == "startup"
assert hass.states.get("sensor.invalid_attribute").state == "startup"
await hass.helpers.entity_component.async_update_entity("sensor.invalid_state")
await hass.helpers.entity_component.async_update_entity("sensor.invalid_icon")
await hass.helpers.entity_component.async_update_entity(
"sensor.invalid_entity_picture"
)
await hass.helpers.entity_component.async_update_entity(
"sensor.invalid_friendly_name"
)
await hass.helpers.entity_component.async_update_entity("sensor.invalid_attribute")
assert hass.states.get("sensor.invalid_state").state == "2"
assert hass.states.get("sensor.invalid_icon").state == "hello"
assert hass.states.get("sensor.invalid_entity_picture").state == "hello"
assert hass.states.get("sensor.invalid_friendly_name").state == "hello"
assert hass.states.get("sensor.invalid_attribute").state == "hello"
| |
"""Tests for arcam fmj receivers."""
from math import isclose
from arcam.fmj import DecodeMode2CH, DecodeModeMCH, IncomingAudioFormat, SourceCodes
import pytest
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
MEDIA_TYPE_MUSIC,
SERVICE_SELECT_SOURCE,
)
from homeassistant.const import ATTR_ENTITY_ID
from .conftest import MOCK_HOST, MOCK_NAME, MOCK_PORT, MOCK_UUID
from tests.async_mock import ANY, MagicMock, Mock, PropertyMock, patch
MOCK_TURN_ON = {
"service": "switch.turn_on",
"data": {"entity_id": "switch.test"},
}
async def update(player, force_refresh=False):
"""Force a update of player and return current state data."""
await player.async_update_ha_state(force_refresh=force_refresh)
return player.hass.states.get(player.entity_id)
async def test_properties(player, state):
"""Test standard properties."""
assert player.unique_id == f"{MOCK_UUID}-1"
assert player.device_info == {
"name": f"Arcam FMJ ({MOCK_HOST})",
"identifiers": {("arcam_fmj", MOCK_UUID), ("arcam_fmj", MOCK_HOST, MOCK_PORT)},
"model": "Arcam FMJ AVR",
"manufacturer": "Arcam",
}
assert not player.should_poll
async def test_powered_off(hass, player, state):
"""Test properties in powered off state."""
state.get_source.return_value = None
state.get_power.return_value = None
data = await update(player)
assert "source" not in data.attributes
assert data.state == "off"
async def test_powered_on(player, state):
"""Test properties in powered on state."""
state.get_source.return_value = SourceCodes.PVR
state.get_power.return_value = True
data = await update(player)
assert data.attributes["source"] == "PVR"
assert data.state == "on"
async def test_supported_features(player, state):
"""Test supported features."""
data = await update(player)
assert data.attributes["supported_features"] == 200588
async def test_turn_on(player, state):
"""Test turn on service."""
state.get_power.return_value = None
await player.async_turn_on()
state.set_power.assert_not_called()
state.get_power.return_value = False
await player.async_turn_on()
state.set_power.assert_called_with(True)
async def test_turn_off(player, state):
"""Test command to turn off."""
await player.async_turn_off()
state.set_power.assert_called_with(False)
@pytest.mark.parametrize("mute", [True, False])
async def test_mute_volume(player, state, mute):
"""Test mute functionality."""
await player.async_mute_volume(mute)
state.set_mute.assert_called_with(mute)
player.async_write_ha_state.assert_called_with()
async def test_name(player):
"""Test name."""
assert player.name == f"{MOCK_NAME} - Zone: 1"
async def test_update(player, state):
"""Test update."""
await update(player, force_refresh=True)
state.update.assert_called_with()
@pytest.mark.parametrize(
"fmt, result",
[
(None, True),
(IncomingAudioFormat.PCM, True),
(IncomingAudioFormat.ANALOGUE_DIRECT, True),
(IncomingAudioFormat.DOLBY_DIGITAL, False),
],
)
async def test_2ch(player, state, fmt, result):
"""Test selection of 2ch mode."""
state.get_incoming_audio_format.return_value = (fmt, None)
assert player._get_2ch() == result # pylint: disable=W0212
@pytest.mark.parametrize(
"source, value",
[("PVR", SourceCodes.PVR), ("BD", SourceCodes.BD), ("INVALID", None)],
)
async def test_select_source(hass, player_setup, state, source, value):
"""Test selection of source."""
await hass.services.async_call(
"media_player",
SERVICE_SELECT_SOURCE,
service_data={ATTR_ENTITY_ID: player_setup, ATTR_INPUT_SOURCE: source},
blocking=True,
)
if value:
state.set_source.assert_called_with(value)
else:
state.set_source.assert_not_called()
async def test_source_list(player, state):
"""Test source list."""
state.get_source_list.return_value = [SourceCodes.BD]
data = await update(player)
assert data.attributes["source_list"] == ["BD"]
@pytest.mark.parametrize(
"mode, mode_sel, mode_2ch, mode_mch",
[
("STEREO", True, DecodeMode2CH.STEREO, None),
("STEREO", False, None, None),
("STEREO", False, None, None),
],
)
async def test_select_sound_mode(player, state, mode, mode_sel, mode_2ch, mode_mch):
"""Test selection sound mode."""
player._get_2ch = Mock(return_value=mode_sel) # pylint: disable=W0212
await player.async_select_sound_mode(mode)
if mode_2ch:
state.set_decode_mode_2ch.assert_called_with(mode_2ch)
else:
state.set_decode_mode_2ch.assert_not_called()
if mode_mch:
state.set_decode_mode_mch.assert_called_with(mode_mch)
else:
state.set_decode_mode_mch.assert_not_called()
async def test_volume_up(player, state):
"""Test mute functionality."""
await player.async_volume_up()
state.inc_volume.assert_called_with()
player.async_write_ha_state.assert_called_with()
async def test_volume_down(player, state):
"""Test mute functionality."""
await player.async_volume_down()
state.dec_volume.assert_called_with()
player.async_write_ha_state.assert_called_with()
@pytest.mark.parametrize(
"mode, mode_sel, mode_2ch, mode_mch",
[
("STEREO", True, DecodeMode2CH.STEREO, None),
("STEREO_DOWNMIX", False, None, DecodeModeMCH.STEREO_DOWNMIX),
(None, False, None, None),
],
)
async def test_sound_mode(player, state, mode, mode_sel, mode_2ch, mode_mch):
"""Test selection sound mode."""
player._get_2ch = Mock(return_value=mode_sel) # pylint: disable=W0212
state.get_decode_mode_2ch.return_value = mode_2ch
state.get_decode_mode_mch.return_value = mode_mch
assert player.sound_mode == mode
async def test_sound_mode_list(player, state):
"""Test sound mode list."""
player._get_2ch = Mock(return_value=True) # pylint: disable=W0212
assert sorted(player.sound_mode_list) == sorted([x.name for x in DecodeMode2CH])
player._get_2ch = Mock(return_value=False) # pylint: disable=W0212
assert sorted(player.sound_mode_list) == sorted([x.name for x in DecodeModeMCH])
async def test_sound_mode_zone_x(player, state):
"""Test second zone sound mode."""
state.zn = 2
assert player.sound_mode is None
assert player.sound_mode_list is None
async def test_is_volume_muted(player, state):
"""Test muted."""
state.get_mute.return_value = True
assert player.is_volume_muted is True # pylint: disable=singleton-comparison
state.get_mute.return_value = False
assert player.is_volume_muted is False # pylint: disable=singleton-comparison
state.get_mute.return_value = None
assert player.is_volume_muted is None
async def test_volume_level(player, state):
"""Test volume."""
state.get_volume.return_value = 0
assert isclose(player.volume_level, 0.0)
state.get_volume.return_value = 50
assert isclose(player.volume_level, 50.0 / 99)
state.get_volume.return_value = 99
assert isclose(player.volume_level, 1.0)
state.get_volume.return_value = None
assert player.volume_level is None
@pytest.mark.parametrize("volume, call", [(0.0, 0), (0.5, 50), (1.0, 99)])
async def test_set_volume_level(player, state, volume, call):
"""Test setting volume."""
await player.async_set_volume_level(volume)
state.set_volume.assert_called_with(call)
@pytest.mark.parametrize(
"source, media_content_type",
[
(SourceCodes.DAB, MEDIA_TYPE_MUSIC),
(SourceCodes.FM, MEDIA_TYPE_MUSIC),
(SourceCodes.PVR, None),
(None, None),
],
)
async def test_media_content_type(player, state, source, media_content_type):
"""Test content type deduction."""
state.get_source.return_value = source
assert player.media_content_type == media_content_type
@pytest.mark.parametrize(
"source, dab, rds, channel",
[
(SourceCodes.DAB, "dab", "rds", "dab"),
(SourceCodes.DAB, None, None, None),
(SourceCodes.FM, "dab", "rds", "rds"),
(SourceCodes.FM, None, None, None),
(SourceCodes.PVR, "dab", "rds", None),
],
)
async def test_media_channel(player, state, source, dab, rds, channel):
"""Test media channel."""
state.get_dab_station.return_value = dab
state.get_rds_information.return_value = rds
state.get_source.return_value = source
assert player.media_channel == channel
@pytest.mark.parametrize(
"source, dls, artist",
[
(SourceCodes.DAB, "dls", "dls"),
(SourceCodes.FM, "dls", None),
(SourceCodes.DAB, None, None),
],
)
async def test_media_artist(player, state, source, dls, artist):
"""Test media artist."""
state.get_dls_pdt.return_value = dls
state.get_source.return_value = source
assert player.media_artist == artist
@pytest.mark.parametrize(
"source, channel, title",
[
(SourceCodes.DAB, "channel", "DAB - channel"),
(SourceCodes.DAB, None, "DAB"),
(None, None, None),
],
)
async def test_media_title(player, state, source, channel, title):
"""Test media title."""
from homeassistant.components.arcam_fmj.media_player import ArcamFmj
state.get_source.return_value = source
with patch.object(
ArcamFmj, "media_channel", new_callable=PropertyMock
) as media_channel:
media_channel.return_value = channel
data = await update(player)
if title is None:
assert "media_title" not in data.attributes
else:
assert data.attributes["media_title"] == title
async def test_added_to_hass(player, state):
"""Test addition to hass."""
from homeassistant.components.arcam_fmj.const import (
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
connectors = {}
def _connect(signal, fun):
connectors[signal] = fun
player.hass = MagicMock()
player.hass.helpers.dispatcher.async_dispatcher_connect.side_effects = _connect
await player.async_added_to_hass()
state.start.assert_called_with()
player.hass.helpers.dispatcher.async_dispatcher_connect.assert_any_call(
SIGNAL_CLIENT_DATA, ANY
)
player.hass.helpers.dispatcher.async_dispatcher_connect.assert_any_call(
SIGNAL_CLIENT_STARTED, ANY
)
player.hass.helpers.dispatcher.async_dispatcher_connect.assert_any_call(
SIGNAL_CLIENT_STOPPED, ANY
)
| |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import google.appengine.ext.ndb as ndb
class GithubResource(ndb.Model):
# A key holder used to define an entitygroup for
# each Issue/PR, for easy ancestor queries.
@staticmethod
def make_key(repo, number):
return ndb.Key(GithubResource, '%s %s' % (repo, number))
def shrink(body):
"""Recursively remove Github API urls from an object to make it more human-readable."""
toremove = []
for key, value in body.iteritems():
if isinstance(value, basestring):
if key.endswith('url'):
if (value.startswith('https://api.github.com/') or
value.startswith('https://avatars.githubusercontent.com')):
toremove.append(key)
elif isinstance(value, dict):
shrink(value)
elif isinstance(value, list):
for el in value:
if isinstance(el, dict):
shrink(el)
for key in toremove:
body.pop(key)
return body
class GithubWebhookRaw(ndb.Model):
repo = ndb.StringProperty()
number = ndb.IntegerProperty(indexed=False)
event = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
body = ndb.TextProperty(compressed=True)
def to_tuple(self):
return (self.event, shrink(json.loads(self.body)), float(self.timestamp.strftime('%s.%f')))
def from_iso8601(t):
return t and datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
def make_kwargs(body, fields):
kwargs = {}
for field in fields:
if field.endswith('_at'):
kwargs[field] = from_iso8601(body[field])
else:
kwargs[field] = body[field]
return kwargs
class GHStatus(ndb.Model):
# Key: {repo}\t{sha}\t{context}
state = ndb.StringProperty(indexed=False)
target_url = ndb.StringProperty(indexed=False)
description = ndb.TextProperty()
created_at = ndb.DateTimeProperty(indexed=False)
updated_at = ndb.DateTimeProperty(indexed=False)
@staticmethod
def make_key(repo, sha, context):
return ndb.Key(GHStatus, '%s\t%s\t%s' % (repo, sha, context))
@staticmethod
def make(repo, sha, context, **kwargs):
return GHStatus(key=GHStatus.make_key(repo, sha, context), **kwargs)
@staticmethod
def query_for_sha(repo, sha):
before = GHStatus.make_key(repo, sha, '')
after = GHStatus.make_key(repo, sha, '\x7f')
return GHStatus.query(GHStatus.key > before, GHStatus.key < after)
@staticmethod
def from_json(body):
kwargs = make_kwargs(body,
'sha context state target_url description '
'created_at updated_at'.split())
kwargs['repo'] = body['name']
return GHStatus.make(**kwargs)
@property
def repo(self):
return self.key.id().split('\t', 1)[0]
@property
def sha(self):
return self.key.id().split('\t', 2)[1]
@property
def context(self):
return self.key.id().split('\t', 2)[2]
class GHIssueDigest(ndb.Model):
# Key: {repo} {number}
is_pr = ndb.BooleanProperty()
is_open = ndb.BooleanProperty()
involved = ndb.StringProperty(repeated=True)
xref = ndb.StringProperty(repeated=True)
payload = ndb.JsonProperty()
updated_at = ndb.DateTimeProperty()
head = ndb.StringProperty()
@staticmethod
def make_key(repo, number):
return ndb.Key(GHIssueDigest, '%s %s' % (repo, number))
@staticmethod
def make(repo, number, is_pr, is_open, involved, payload, updated_at):
return GHIssueDigest(key=GHIssueDigest.make_key(repo, number),
is_pr=is_pr, is_open=is_open, involved=involved, payload=payload,
updated_at=updated_at, head=payload.get('head'),
xref=payload.get('xrefs', []))
@staticmethod
def get(repo, number):
return GHIssueDigest.make_key(repo, number).get()
@property
def repo(self):
return self.key.id().split()[0]
@property
def number(self):
return int(self.key.id().split()[1])
@property
def url(self):
return 'https://github.com/%s/issues/%s' % tuple(self.key.id().split())
@property
def title(self):
return self.payload.get('title', '')
@staticmethod
def find_head(repo, head):
return GHIssueDigest.query(GHIssueDigest.key > GHIssueDigest.make_key(repo, ''),
GHIssueDigest.key < GHIssueDigest.make_key(repo, '~'),
GHIssueDigest.head == head)
@staticmethod
@ndb.tasklet
def find_xrefs_async(xref):
issues = yield GHIssueDigest.query(GHIssueDigest.xref == xref).fetch_async()
raise ndb.Return(list(issues))
@staticmethod
@ndb.tasklet
def find_xrefs_multi_async(xrefs):
"""
Given a list of xrefs to search for, return a dict of lists
of result values. Xrefs that have no corresponding issues are
not represented in the dictionary.
"""
# The IN operator does multiple sequential queries and ORs them
# together. This is slow here-- a range query is faster, since
# this is used to get xrefs for a set of contiguous builds.
if not xrefs: # nothing => nothing
raise ndb.Return({})
xrefs = set(xrefs)
issues = yield GHIssueDigest.query(
GHIssueDigest.xref >= min(xrefs),
GHIssueDigest.xref <= max(xrefs)).fetch_async(batch_size=500)
refs = {}
for issue in issues:
for xref in issue.xref:
if xref in xrefs:
refs.setdefault(xref, []).append(issue)
raise ndb.Return(refs)
@staticmethod
def find_open_prs():
# pylint: disable=singleton-comparison
return GHIssueDigest.query(GHIssueDigest.is_pr == True,
GHIssueDigest.is_open == True)
@staticmethod
def find_open_prs_for_repo(repo):
return (GHIssueDigest.find_open_prs()
.filter(GHIssueDigest.key > GHIssueDigest.make_key(repo, ''),
GHIssueDigest.key < GHIssueDigest.make_key(repo, '~')))
class GHUserState(ndb.Model):
# Key: {github username}
acks = ndb.JsonProperty() # dict of issue keys => ack time (seconds since epoch)
@staticmethod
def make_key(user):
return ndb.Key(GHUserState, user)
@staticmethod
def make(user, acks=None):
return GHUserState(key=GHUserState.make_key(user), acks=acks or {})
@ndb.transactional
def save_if_newer(obj):
assert obj.updated_at is not None
old = obj.key.get()
if old is None:
obj.put()
return True
else:
if old.updated_at is None or obj.updated_at >= old.updated_at:
obj.put()
return True
return False
| |
"""
util tests
"""
import os
import stat
import sys
import time
import shutil
import tempfile
import pytest
from mock import Mock, patch
from pip.exceptions import BadCommand
from pip.utils import (egg_link_path, Inf, get_installed_distributions,
find_command, untar_file, unzip_file, rmtree)
from pip.operations.freeze import freeze_excludes
class Tests_EgglinkPath:
"util.egg_link_path() tests"
def setup(self):
project = 'foo'
self.mock_dist = Mock(project_name=project)
self.site_packages = 'SITE_PACKAGES'
self.user_site = 'USER_SITE'
self.user_site_egglink = os.path.join(
self.user_site,
'%s.egg-link' % project
)
self.site_packages_egglink = os.path.join(
self.site_packages,
'%s.egg-link' % project,
)
# patches
from pip import utils
self.old_site_packages = utils.site_packages
self.mock_site_packages = utils.site_packages = 'SITE_PACKAGES'
self.old_running_under_virtualenv = utils.running_under_virtualenv
self.mock_running_under_virtualenv = utils.running_under_virtualenv = \
Mock()
self.old_virtualenv_no_global = utils.virtualenv_no_global
self.mock_virtualenv_no_global = utils.virtualenv_no_global = Mock()
self.old_user_site = utils.user_site
self.mock_user_site = utils.user_site = self.user_site
from os import path
self.old_isfile = path.isfile
self.mock_isfile = path.isfile = Mock()
def teardown(self):
from pip import utils
utils.site_packages = self.old_site_packages
utils.running_under_virtualenv = self.old_running_under_virtualenv
utils.virtualenv_no_global = self.old_virtualenv_no_global
utils.user_site = self.old_user_site
from os import path
path.isfile = self.old_isfile
def eggLinkInUserSite(self, egglink):
return egglink == self.user_site_egglink
def eggLinkInSitePackages(self, egglink):
return egglink == self.site_packages_egglink
# ####################### #
# # egglink in usersite # #
# ####################### #
def test_egglink_in_usersite_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert egg_link_path(self.mock_dist) == self.user_site_egglink
def test_egglink_in_usersite_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert egg_link_path(self.mock_dist) is None
def test_egglink_in_usersite_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert egg_link_path(self.mock_dist) == self.user_site_egglink
# ####################### #
# # egglink in sitepkgs # #
# ####################### #
def test_egglink_in_sitepkgs_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
def test_egglink_in_sitepkgs_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
def test_egglink_in_sitepkgs_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
# ################################## #
# # egglink in usersite & sitepkgs # #
# ################################## #
def test_egglink_in_both_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.return_value = True
assert egg_link_path(self.mock_dist) == self.user_site_egglink
def test_egglink_in_both_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = True
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
def test_egglink_in_both_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = True
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
# ############## #
# # no egglink # #
# ############## #
def test_noegglink_in_sitepkgs_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.return_value = False
assert egg_link_path(self.mock_dist) is None
def test_noegglink_in_sitepkgs_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = False
assert egg_link_path(self.mock_dist) is None
def test_noegglink_in_sitepkgs_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = False
assert egg_link_path(self.mock_dist) is None
def test_Inf_greater():
"""Test Inf compares greater."""
assert Inf > object()
def test_Inf_equals_Inf():
"""Test Inf compares greater."""
assert Inf == Inf
@patch('pip.utils.dist_in_usersite')
@patch('pip.utils.dist_is_local')
@patch('pip.utils.dist_is_editable')
class Tests_get_installed_distributions:
"""test util.get_installed_distributions"""
workingset = [
Mock(test_name="global"),
Mock(test_name="editable"),
Mock(test_name="normal"),
Mock(test_name="user"),
]
workingset_stdlib = [
Mock(test_name='normal', key='argparse'),
Mock(test_name='normal', key='wsgiref')
]
workingset_freeze = [
Mock(test_name='normal', key='pip'),
Mock(test_name='normal', key='setuptools'),
Mock(test_name='normal', key='distribute')
]
def dist_is_editable(self, dist):
return dist.test_name == "editable"
def dist_is_local(self, dist):
return dist.test_name != "global" and dist.test_name != 'user'
def dist_in_usersite(self, dist):
return dist.test_name == "user"
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_editables_only(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(editables_only=True)
assert len(dists) == 1, dists
assert dists[0].test_name == "editable"
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_exclude_editables(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(include_editables=False)
assert len(dists) == 1
assert dists[0].test_name == "normal"
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_include_globals(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(local_only=False)
assert len(dists) == 4
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_user_only(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(local_only=False,
user_only=True)
assert len(dists) == 1
assert dists[0].test_name == "user"
@pytest.mark.skipif("sys.version_info >= (2,7)")
@patch('pip._vendor.pkg_resources.working_set', workingset_stdlib)
def test_py26_excludes(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions()
assert len(dists) == 1
assert dists[0].key == 'argparse'
@pytest.mark.skipif("sys.version_info < (2,7)")
@patch('pip._vendor.pkg_resources.working_set', workingset_stdlib)
def test_gte_py27_excludes(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions()
assert len(dists) == 0
@patch('pip._vendor.pkg_resources.working_set', workingset_freeze)
def test_freeze_excludes(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(skip=freeze_excludes)
assert len(dists) == 0
def test_find_command_folder_in_path(tmpdir):
"""
If a folder named e.g. 'git' is in PATH, and find_command is looking for
the 'git' executable, it should not match the folder, but rather keep
looking.
"""
tmpdir.join("path_one").mkdir()
path_one = tmpdir / 'path_one'
path_one.join("foo").mkdir()
tmpdir.join("path_two").mkdir()
path_two = tmpdir / 'path_two'
path_two.join("foo").write("# nothing")
found_path = find_command('foo', map(str, [path_one, path_two]))
assert found_path == path_two / 'foo'
def test_does_not_find_command_because_there_is_no_path():
"""
Test calling `pip.utils.find_command` when there is no PATH env variable
"""
environ_before = os.environ
os.environ = {}
try:
try:
find_command('anycommand')
except BadCommand:
e = sys.exc_info()[1]
assert e.args == ("Cannot find command 'anycommand'",)
else:
raise AssertionError("`find_command` should raise `BadCommand`")
finally:
os.environ = environ_before
@patch('os.pathsep', ':')
@patch('pip.utils.get_pathext')
@patch('os.path.isfile')
def test_find_command_trys_all_pathext(mock_isfile, getpath_mock):
"""
If no pathext should check default list of extensions, if file does not
exist.
"""
mock_isfile.return_value = False
getpath_mock.return_value = os.pathsep.join([".COM", ".EXE"])
paths = [
os.path.join('path_one', f) for f in ['foo.com', 'foo.exe', 'foo']
]
expected = [((p,),) for p in paths]
with pytest.raises(BadCommand):
find_command("foo", "path_one")
assert (
mock_isfile.call_args_list == expected
), "Actual: %s\nExpected %s" % (mock_isfile.call_args_list, expected)
assert getpath_mock.called, "Should call get_pathext"
@patch('os.pathsep', ':')
@patch('pip.utils.get_pathext')
@patch('os.path.isfile')
def test_find_command_trys_supplied_pathext(mock_isfile, getpath_mock):
"""
If pathext supplied find_command should use all of its list of extensions
to find file.
"""
mock_isfile.return_value = False
getpath_mock.return_value = ".FOO"
pathext = os.pathsep.join([".RUN", ".CMD"])
paths = [
os.path.join('path_one', f) for f in ['foo.run', 'foo.cmd', 'foo']
]
expected = [((p,),) for p in paths]
with pytest.raises(BadCommand):
find_command("foo", "path_one", pathext)
assert (
mock_isfile.call_args_list == expected
), "Actual: %s\nExpected %s" % (mock_isfile.call_args_list, expected)
assert not getpath_mock.called, "Should not call get_pathext"
class TestUnpackArchives(object):
"""
test_tar.tgz/test_tar.zip have content as follows engineered to confirm 3
things:
1) confirm that reg files, dirs, and symlinks get unpacked
2) permissions are not preserved (and go by the 022 umask)
3) reg files with *any* execute perms, get chmod +x
file.txt 600 regular file
symlink.txt 777 symlink to file.txt
script_owner.sh 700 script where owner can execute
script_group.sh 610 script where group can execute
script_world.sh 601 script where world can execute
dir 744 directory
dir/dirfile 622 regular file
"""
def setup(self):
self.tempdir = tempfile.mkdtemp()
self.old_mask = os.umask(0o022)
self.symlink_expected_mode = None
def teardown(self):
os.umask(self.old_mask)
shutil.rmtree(self.tempdir, ignore_errors=True)
def mode(self, path):
return stat.S_IMODE(os.stat(path).st_mode)
def confirm_files(self):
# expections based on 022 umask set above and the unpack logic that
# sets execute permissions, not preservation
for fname, expected_mode, test in [
('file.txt', 0o644, os.path.isfile),
('symlink.txt', 0o644, os.path.isfile),
('script_owner.sh', 0o755, os.path.isfile),
('script_group.sh', 0o755, os.path.isfile),
('script_world.sh', 0o755, os.path.isfile),
('dir', 0o755, os.path.isdir),
(os.path.join('dir', 'dirfile'), 0o644, os.path.isfile)]:
path = os.path.join(self.tempdir, fname)
if path.endswith('symlink.txt') and sys.platform == 'win32':
# no symlinks created on windows
continue
assert test(path), path
if sys.platform == 'win32':
# the permissions tests below don't apply in windows
# due to os.chmod being a noop
continue
mode = self.mode(path)
assert mode == expected_mode, (
"mode: %s, expected mode: %s" % (mode, expected_mode)
)
def test_unpack_tgz(self, data):
"""
Test unpacking a *.tgz, and setting execute permissions
"""
test_file = data.packages.join("test_tar.tgz")
untar_file(test_file, self.tempdir)
self.confirm_files()
def test_unpack_zip(self, data):
"""
Test unpacking a *.zip, and setting execute permissions
"""
test_file = data.packages.join("test_zip.zip")
unzip_file(test_file, self.tempdir)
self.confirm_files()
class Failer:
def __init__(self, duration=1):
self.succeed_after = time.time() + duration
def call(self, *args, **kw):
"""Fail with OSError self.max_fails times"""
if time.time() < self.succeed_after:
raise OSError("Failed")
def test_rmtree_retries(tmpdir, monkeypatch):
"""
Test pip.utils.rmtree will retry failures
"""
monkeypatch.setattr(shutil, 'rmtree', Failer(duration=1).call)
rmtree('foo')
def test_rmtree_retries_for_3sec(tmpdir, monkeypatch):
"""
Test pip.utils.rmtree will retry failures for no more than 3 sec
"""
monkeypatch.setattr(shutil, 'rmtree', Failer(duration=5).call)
with pytest.raises(OSError):
rmtree('foo')
| |
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(RuntimeWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
"""Check that dense and sparse minibatch update give the same results"""
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, verbose=10,
init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
"""Check if copy_x=False returns nearly equal X after de-centering."""
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
"""Check k_means with a bad initialization does not yield a singleton
Starting with bad centers that are quickly ignored should not
result in a repositioning of the centers to the center of mass that
would lead to collapsed centers which in turns make the clustering
dependent of the numerical unstabilities.
"""
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
"""Check that increasing the number of init increases the quality"""
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import sys
import os
try:
from distutils.core import setup
from distutils.extension import Extension
except ImportError:
print 'The distutils package is required to build or install RMG Py.'
try:
from Cython.Distutils import build_ext
import Cython.Compiler.Options
except ImportError:
print 'Cython (http://www.cython.org/) is required to build or install RMG Py.'
try:
import numpy
except ImportError:
print 'NumPy (http://numpy.scipy.org/) is required to build or install RMG Py.'
# Create annotated HTML files for each of the Cython modules
Cython.Compiler.Options.annotate = True
# Turn on profiling capacity for all Cython modules
#Cython.Compiler.Options.directive_defaults['profile'] = True
################################################################################
def getMainExtensionModules():
return [
# Kinetics
Extension('rmgpy.kinetics.arrhenius', ['rmgpy/kinetics/arrhenius.pyx']),
Extension('rmgpy.kinetics.chebyshev', ['rmgpy/kinetics/chebyshev.pyx']),
Extension('rmgpy.kinetics.kineticsdata', ['rmgpy/kinetics/kineticsdata.pyx']),
Extension('rmgpy.kinetics.falloff', ['rmgpy/kinetics/falloff.pyx']),
Extension('rmgpy.kinetics.model', ['rmgpy/kinetics/model.pyx']),
Extension('rmgpy.kinetics.tunneling', ['rmgpy/kinetics/tunneling.pyx']),
# Molecules and molecular representations
Extension('rmgpy.molecule.atomtype', ['rmgpy/molecule/atomtype.py'], include_dirs=['.']),
Extension('rmgpy.molecule.element', ['rmgpy/molecule/element.py'], include_dirs=['.']),
Extension('rmgpy.molecule.graph', ['rmgpy/molecule/graph.pyx'], include_dirs=['.']),
Extension('rmgpy.molecule.group', ['rmgpy/molecule/group.py'], include_dirs=['.']),
Extension('rmgpy.molecule.molecule', ['rmgpy/molecule/molecule.py'], include_dirs=['.']),
Extension('rmgpy.molecule.symmetry', ['rmgpy/molecule/symmetry.py'], include_dirs=['.']),
Extension('rmgpy.molecule.vf2', ['rmgpy/molecule/vf2.pyx'], include_dirs=['.']),
# Pressure dependence
Extension('rmgpy.pdep.collision', ['rmgpy/pdep/collision.pyx']),
Extension('rmgpy.pdep.configuration', ['rmgpy/pdep/configuration.pyx']),
Extension('rmgpy.pdep.me', ['rmgpy/pdep/me.pyx']),
Extension('rmgpy.pdep.msc', ['rmgpy/pdep/msc.pyx']),
Extension('rmgpy.pdep.reaction', ['rmgpy/pdep/reaction.pyx']),
Extension('rmgpy.pdep.rs', ['rmgpy/pdep/rs.pyx']),
Extension('rmgpy.pdep.cse', ['rmgpy/pdep/cse.pyx']),
# Statistical mechanics
Extension('rmgpy.statmech.conformer', ['rmgpy/statmech/conformer.pyx']),
Extension('rmgpy.statmech.mode', ['rmgpy/statmech/mode.pyx']),
Extension('rmgpy.statmech.rotation', ['rmgpy/statmech/rotation.pyx']),
Extension('rmgpy.statmech.schrodinger', ['rmgpy/statmech/schrodinger.pyx']),
Extension('rmgpy.statmech.torsion', ['rmgpy/statmech/torsion.pyx']),
Extension('rmgpy.statmech.translation', ['rmgpy/statmech/translation.pyx']),
Extension('rmgpy.statmech.vibration', ['rmgpy/statmech/vibration.pyx']),
# Thermodynamics
Extension('rmgpy.thermo.thermodata', ['rmgpy/thermo/thermodata.pyx']),
Extension('rmgpy.thermo.model', ['rmgpy/thermo/model.pyx']),
Extension('rmgpy.thermo.nasa', ['rmgpy/thermo/nasa.pyx']),
Extension('rmgpy.thermo.wilhoit', ['rmgpy/thermo/wilhoit.pyx']),
# Miscellaneous
Extension('rmgpy.constants', ['rmgpy/constants.py'], include_dirs=['.']),
Extension('rmgpy.quantity', ['rmgpy/quantity.py'], include_dirs=['.']),
Extension('rmgpy.reaction', ['rmgpy/reaction.py'], include_dirs=['.']),
Extension('rmgpy.species', ['rmgpy/species.py'], include_dirs=['.']),
]
def getMeasureExtensionModules():
return [
Extension('rmgpy.measure._network', ['rmgpy/measure/_network.pyx'], include_dirs=['.']),
Extension('rmgpy.measure.collision', ['rmgpy/measure/collision.pyx'], include_dirs=['.']),
Extension('rmgpy.measure.reaction', ['rmgpy/measure/reaction.pyx'], include_dirs=['.']),
Extension('rmgpy.measure.msc', ['rmgpy/measure/msc.pyx'], include_dirs=['.']),
Extension('rmgpy.measure.rs', ['rmgpy/measure/rs.pyx'], include_dirs=['.']),
Extension('rmgpy.measure.cse', ['rmgpy/measure/cse.pyx'], include_dirs=['.']),
Extension('rmgpy.measure.me', ['rmgpy/measure/me.pyx'], include_dirs=['.']),
Extension('rmgpy.constants', ['rmgpy/constants.py'], include_dirs=['.']),
Extension('rmgpy.quantity', ['rmgpy/quantity.py'], include_dirs=['.']),
]
def getSolverExtensionModules():
return [
Extension('rmgpy.solver.base', ['rmgpy/solver/base.pyx'], include_dirs=['.']),
Extension('rmgpy.solver.simple', ['rmgpy/solver/simple.pyx'], include_dirs=['.']),
Extension('rmgpy.solver.liquid', ['rmgpy/solver/liquid.pyx'], include_dirs=['.']),
]
def getCanthermExtensionModules():
return [
# Kinetics
Extension('rmgpy.kinetics.arrhenius', ['rmgpy/kinetics/arrhenius.pyx']),
Extension('rmgpy.kinetics.chebyshev', ['rmgpy/kinetics/chebyshev.pyx']),
Extension('rmgpy.kinetics.kineticsdata', ['rmgpy/kinetics/kineticsdata.pyx']),
Extension('rmgpy.kinetics.falloff', ['rmgpy/kinetics/falloff.pyx']),
Extension('rmgpy.kinetics.model', ['rmgpy/kinetics/model.pyx']),
Extension('rmgpy.kinetics.tunneling', ['rmgpy/kinetics/tunneling.pyx']),
# Pressure dependence
Extension('rmgpy.pdep.collision', ['rmgpy/pdep/collision.pyx']),
Extension('rmgpy.pdep.configuration', ['rmgpy/pdep/configuration.pyx']),
Extension('rmgpy.pdep.me', ['rmgpy/pdep/me.pyx']),
Extension('rmgpy.pdep.msc', ['rmgpy/pdep/msc.pyx']),
Extension('rmgpy.pdep.reaction', ['rmgpy/pdep/reaction.pyx']),
Extension('rmgpy.pdep.rs', ['rmgpy/pdep/rs.pyx']),
Extension('rmgpy.pdep.cse', ['rmgpy/pdep/cse.pyx']),
# Statistical mechanics
Extension('rmgpy.statmech.conformer', ['rmgpy/statmech/conformer.pyx']),
Extension('rmgpy.statmech.mode', ['rmgpy/statmech/mode.pyx']),
Extension('rmgpy.statmech.rotation', ['rmgpy/statmech/rotation.pyx']),
Extension('rmgpy.statmech.schrodinger', ['rmgpy/statmech/schrodinger.pyx']),
Extension('rmgpy.statmech.torsion', ['rmgpy/statmech/torsion.pyx']),
Extension('rmgpy.statmech.translation', ['rmgpy/statmech/translation.pyx']),
Extension('rmgpy.statmech.vibration', ['rmgpy/statmech/vibration.pyx']),
# Thermodynamics
Extension('rmgpy.thermo.thermodata', ['rmgpy/thermo/thermodata.pyx']),
Extension('rmgpy.thermo.model', ['rmgpy/thermo/model.pyx']),
Extension('rmgpy.thermo.nasa', ['rmgpy/thermo/nasa.pyx']),
Extension('rmgpy.thermo.wilhoit', ['rmgpy/thermo/wilhoit.pyx']),
# Miscellaneous
Extension('rmgpy.constants', ['rmgpy/constants.py'], include_dirs=['.']),
Extension('rmgpy.quantity', ['rmgpy/quantity.py'], include_dirs=['.']),
]
################################################################################
ext_modules = []
if 'install' in sys.argv:
# This is so users can still do simply `python setup.py install`
ext_modules.extend(getMainExtensionModules())
ext_modules.extend(getMeasureExtensionModules())
ext_modules.extend(getSolverExtensionModules())
elif 'main' in sys.argv:
# This is for `python setup.py build_ext main`
sys.argv.remove('main')
ext_modules.extend(getMainExtensionModules())
elif 'measure' in sys.argv:
# This is for `python setup.py build_ext measure`
sys.argv.remove('measure')
ext_modules.extend(getMeasureExtensionModules())
elif 'solver' in sys.argv:
# This is for `python setup.py build_ext solver`
sys.argv.remove('solver')
ext_modules.extend(getSolverExtensionModules())
elif 'cantherm' in sys.argv:
# This is for `python setup.py build_ext cantherm`
sys.argv.remove('cantherm')
ext_modules.extend(getMainExtensionModules())
ext_modules.extend(getCanthermExtensionModules())
elif 'minimal' in sys.argv:
# This starts with the full install list, but removes anything that has a pure python mode
# i.e. in only includes things whose source is .pyx
sys.argv.remove('minimal')
temporary_list = []
temporary_list.extend(getMainExtensionModules())
temporary_list.extend(getMeasureExtensionModules())
temporary_list.extend(getSolverExtensionModules())
for module in temporary_list:
for source in module.sources:
if os.path.splitext(source)[1] == '.pyx':
ext_modules.append(module)
scripts=['cantherm.py', 'measure.py', 'rmg.py']
# Initiate the build and/or installation
setup(name='RMG Py',
version='0.1.0',
description='Reaction Mechanism Generator',
author='William H. Green and the RMG Team',
author_email='rmg_dev@mit.edu',
url='http://rmg.mit.edu/',
packages=['rmgpy'],
scripts=scripts,
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
include_dirs=['.', numpy.get_include()],
)
| |
#!/usr/bin/env python
# encoding: utf-8
import os
from efl.ecore import Timer, ECORE_CALLBACK_CANCEL, ECORE_CALLBACK_RENEW, \
AnimatorTimeline
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL, \
EVAS_ASPECT_CONTROL_VERTICAL, EVAS_CALLBACK_MOUSE_MOVE, \
EVAS_CALLBACK_MOUSE_UP, EVAS_CALLBACK_MOUSE_DOWN, \
EVAS_EVENT_FLAG_ON_HOLD
from efl import elementary
from efl.elementary.label import Label
from efl.elementary.frame import Frame
from efl.elementary.list import List
from efl.elementary.box import Box
from efl.elementary.window import StandardWindow
from efl.elementary.icon import Icon
from efl.elementary.genlist import Genlist, GenlistItemClass, \
ELM_SEL_FORMAT_TARGETS, ELM_GENLIST_ITEM_NONE, DragUserInfo
from efl.elementary.gengrid import Gengrid, GengridItemClass
from efl.elementary.configuration import Configuration
conf = Configuration()
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
script_path = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(script_path, "images")
img = (
"panel_01.jpg",
"plant_01.jpg",
"rock_01.jpg",
"rock_02.jpg",
"sky_01.jpg",
"sky_02.jpg",
"sky_03.jpg",
"sky_04.jpg",
"wood_01.jpg",
)
class AnimIconSt:
start_x = 0
start_y = 0
o = None
class DragAnimSt:
icwin = None
e = None
mdx = 0 # Mouse-down x
mdy = 0 # Mouse-down y
icons = [] # List of icons to animate (anim_icon_st)
tm = None
ea = None
gl = None
DRAG_TIMEOUT = 0.3
ANIM_TIME = 0.5
class DndGenlistItemClass(GenlistItemClass):
def text_get(self, obj, part, data, *args):
return data
def content_get(self, obj, part, data, *args):
if part == "elm.swallow.icon":
icon = Icon(obj, file=os.path.join(img_path, data),
size_hint_aspect=(EVAS_ASPECT_CONTROL_VERTICAL, 1, 1))
icon.show()
return icon
return None
itc1 = DndGenlistItemClass()
class DndGengridItemClass(GengridItemClass):
def text_get(self, obj, part, data, *args):
return data
def content_get(self, obj, part, data, *args):
if part == "elm.swallow.icon":
icon = Icon(obj, file=os.path.join(img_path, data),
size_hint_aspect=(EVAS_ASPECT_CONTROL_VERTICAL, 1, 1))
icon.show()
return icon
return None
gic = DndGengridItemClass()
def win_del(obj, data):
print("will del <%s>" % data)
data.drop_item_container_del()
data.drag_item_container_del()
#elementary.exit()
def gl_item_getcb(gl, x, y):
# This function returns pointer to item under (x,y) coords
gli, yposret = gl.at_xy_item_get(x, y)
if gli is not None:
print("over <%s>, gli=%r yposret=%i" % (
gli.part_text_get("elm.text"), gli, yposret))
else:
print("over none, yposret=%i" % yposret)
return gli, None, yposret
def grid_item_getcb(grid, x, y):
# This function returns pointer to item under (x,y) coords
item, xposret, yposret = grid.at_xy_item_get(x, y)
if item is not None:
print("over <%s>, item=%r xposret=%i yposret=%i" % (
item.part_text_get("elm.text"), item, xposret, yposret))
else:
print("over none, xposret=%i yposret=%i", xposret, yposret)
return item, xposret, yposret
def gl_dropcb(obj, it, ev, xposret, yposret, data):
# This function is called when data is dropped on the genlist
if ev.data is None:
return False
p = ev.data
wh0rdlist = p.split("#")
wh0rdlist.pop(0)
wh0rdlist.pop()
for wh0rd in wh0rdlist:
print("Item %s" % wh0rd)
if yposret == -1:
obj.item_insert_before(itc1, wh0rd, before_item=it, flags=ELM_GENLIST_ITEM_NONE)
elif yposret == 0 or yposret == 1:
if not it:
it = obj.last_item
if it:
obj.item_insert_after(itc1, wh0rd, after_item=it, flags=ELM_GENLIST_ITEM_NONE)
else:
obj.item_append(itc1, wh0rd, flags=ELM_GENLIST_ITEM_NONE)
else:
return False
return True
def grid_dropcb(obj, it, ev, xposret, yposret, data):
# This function is called when data is dropped on the gengrid
if ev.data is None:
return False
p = ev.data
wh0rdlist = p.split("#")
wh0rdlist.pop(0)
wh0rdlist.pop()
for wh0rd in wh0rdlist:
print("Item %s" % wh0rd)
if not it:
it = obj.last_item
if it:
it = obj.item_insert_after(gic, wh0rd, after_item=it)
else:
it = obj.item_append(gic, wh0rd)
return True
def anim_st_free(anim_st):
# Stops and free mem of ongoing animation
if anim_st is not None:
anim_st.gl.event_callback_del(
EVAS_CALLBACK_MOUSE_MOVE, gl_obj_mouse_move)
anim_st.gl.event_callback_del(
EVAS_CALLBACK_MOUSE_UP, gl_obj_mouse_up)
if anim_st.tm is not None:
anim_st.tm.delete()
anim_st.tm = None
if anim_st.ea is not None:
anim_st.ea.delete()
anim_st.ea = None
for st in anim_st.icons:
st.o.delete()
def drag_anim_play(pos, anim_st):
# Impl of the animation of icons, called on frame time
if anim_st is not None:
if pos > 0.99:
anim_st.ea = None # Avoid deleting on mouse up
for st in anim_st.icons:
st.o.hide() # Hide animated icons
anim_st_free(anim_st)
return ECORE_CALLBACK_CANCEL
for st in anim_st.icons:
w, h = st.o.size
xm, ym = anim_st.e.pointer_canvas_xy
x = st.start_x + (pos * (xm - (st.start_x + (w/2))))
y = st.start_y + (pos * (ym - (st.start_y + (h/2))))
st.o.move(x, y)
return ECORE_CALLBACK_RENEW
return ECORE_CALLBACK_CANCEL
def gl_anim_start(anim_st):
# Start icons animation before actually drag-starts
yposret = 0
items = list(anim_st.gl.selected_items)
gli, yposret = anim_st.gl.at_xy_item_get(anim_st.mdx, anim_st.mdy)
if gli is not None:
# Add the item mouse is over to the list if NOT seleced
if not gli in items:
items.append(gli)
for gli in items:
# Now add icons to animation window
o = gli.part_content_get("elm.swallow.icon")
if o is not None:
st = AnimIconSt()
ic = Icon(anim_st.gl, file=o.file, size_hint_align=FILL_BOTH,
size_hint_weight=EXPAND_BOTH, pos=o.pos, size=o.size)
st.start_x, st.start_y = o.pos
ic.show()
st.o = ic
anim_st.icons.append(st)
anim_st.tm = None
anim_st.ea = AnimatorTimeline(drag_anim_play, DRAG_TIMEOUT,
anim_st)
return ECORE_CALLBACK_CANCEL
def gl_obj_mouse_up(obj, event_info, data):
# Cancel any drag waiting to start on timeout
anim_st_free(data)
def gl_obj_mouse_move(obj, event_info, data):
# Cancel any drag waiting to start on timeout
if event_info.event_flags & EVAS_EVENT_FLAG_ON_HOLD:
print("event on hold")
anim_st_free(data)
def gl_obj_mouse_down(obj, event_info, data):
# Launch a timer to start drag animation
anim_st = DragAnimSt()
anim_st.e = obj.evas
anim_st.mdx = event_info.position.canvas.x
anim_st.mdy = event_info.position.canvas.y
anim_st.gl = data
anim_st.tm = Timer(DRAG_TIMEOUT, gl_anim_start, anim_st)
data.event_callback_add(EVAS_CALLBACK_MOUSE_UP,
gl_obj_mouse_up, anim_st)
data.event_callback_add(EVAS_CALLBACK_MOUSE_MOVE,
gl_obj_mouse_move, anim_st)
# END - Handling drag start animation
def gl_dragdone(obj, doaccept, data):
if doaccept:
# Remove items dragged out (accepted by target)
for it in data:
it.delete()
def gl_createicon(win, xoff, yoff, data):
it = data
o = it.part_content_get("elm.swallow.icon")
if o is None:
return
w = h = 30
f, g = o.file
xm, ym = o.evas.pointer_canvas_xy
if xoff is not None:
xoff = xm - (w/2)
if yoff is not None:
yoff = ym - (h/2)
icon = Icon(win, file=(f, g), size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_BOTH)
if (xoff is not None) and (yoff is not None):
icon.move(xoff, yoff)
icon.resize(w, h)
return icon, xoff, yoff
def gl_icons_get(gl):
# Start icons animation before actually drag-starts
yposret = 0
icons = []
xm, ym = gl.evas.pointer_canvas_xy
items = list(gl.selected_items)
gli, yposret = gl.at_xy_item_get(xm, ym)
if gli is not None:
# Add the item mouse is over to the list if NOT seleced
if not gli in items:
items.append(gli)
for it in items:
# Now add icons to animation window
o = it.part_content_get("elm.swallow.icon")
if o is not None:
f, g = o.file
x, y, w, h = o.geometry
ic = Icon(gl, file=(f, g), size_hint_align=FILL_BOTH,
size_hint_weight=EXPAND_BOTH, pos=(x,y), size=(w,h))
ic.show()
icons.append(ic)
return icons
def gl_get_drag_data(gl, it):
# Construct a string of dragged info, user frees returned string
drag_data = None
items = list(gl.selected_items)
if it is not None:
# Add the item mouse is over to the list if NOT seleced
if not it in items:
items.append(it)
if items is not None:
# Now we can actually compose string to send and start dragging
drag_data = "file://"
for it in items:
t = it.part_text_get("elm.text")
if t is not None:
drag_data += "#"
drag_data += t
drag_data += "#"
print("Sending <%s>" % drag_data)
return drag_data, items
def grid_get_drag_data(gg, it):
# Construct a string of dragged info, user frees returned string
drag_data = None
items = list(gg.selected_items)
if it is not None:
# Add the item mouse is over to the list if NOT seleced
if not it in items:
items.append(it)
if items is not None:
# Now we can actually compose string to send and start dragging
drag_data = "file://"
for it in items:
t = it.part_text_get("elm.text")
if t is not None:
drag_data += "#"
drag_data += t
drag_data += "#"
print("Sending <%s>" % drag_data)
return drag_data, items
def gl_dnd_default_anim_data_getcb(gl, it, info):
# This called before starting to drag, mouse-down was on it
info.format = ELM_SEL_FORMAT_TARGETS
info.createicon = gl_createicon
info.createdata = it
info.icons = gl_icons_get(gl)
info.dragdone = gl_dragdone
# Now, collect data to send for drop from ALL selected items
# Save list pointer to remove items after drop and free list on done
info.data, info.donecbdata = gl_get_drag_data(gl, it)
info.acceptdata = info.donecbdata
if info.data is not None:
return info
else:
return
def gl_data_getcb(gl, it, info):
# This called before starting to drag, mouse-down was on it
info.format = ELM_SEL_FORMAT_TARGETS
info.createicon = gl_createicon
info.createdata = it
info.dragdone = gl_dragdone
# Now, collect data to send for drop from ALL selected items
# Save list pointer to remove items after drop and free list on done
info.data, info.donecbdata = gl_get_drag_data(gl, it)
info.acceptdata = info.donecbdata
if info.data is not None:
return True
else:
return False
def grid_icons_get(grid):
# Start icons animation before actually drag-starts
xposret, yposret = 0, 0
icons = []
xm, ym = grid.evas.pointer_canvas_xy
items = list(grid.selected_items)
print(items)
gli, xposret, yposret = grid.at_xy_item_get(xm, ym)
if gli is not None:
# Add the item mouse is over to the list if NOT seleced
if not gli in items:
items.append(gli)
print(items)
for gli in items:
# Now add icons to animation window
o = gli.part_content_get("elm.swallow.icon")
if o is not None:
ic = Icon(grid, file=o.file, pos=o.pos, size=o.size,
size_hint_align=FILL_BOTH, size_hint_weight=EXPAND_BOTH)
ic.show()
icons.append(ic)
return icons
def grid_data_getcb(grid, it, info):
# This called before starting to drag, mouse-down was on it
info.format = ELM_SEL_FORMAT_TARGETS
info.createicon = gl_createicon
info.createdata = it
info.icons = grid_icons_get(grid)
info.dragdone = gl_dragdone
# Now, collect data to send for drop from ALL selected items
# Save list pointer to remove items after drop and free list on done
info.data, info.donecbdata = grid_get_drag_data(grid, it)
info.acceptdata = info.donecbdata
if info.data:
return True
else:
return False
def dnd_genlist_default_anim_clicked(obj, item=None):
win = StandardWindow("dnd-genlist-default-anim",
"DnD-Genlist-Default-Anim", autodel=True, size=(680, 800))
bxx = Box(win, horizontal=True, size_hint_weight=EXPAND_BOTH)
win.resize_object_add(bxx)
bxx.show()
for j in range(2):
gl = Genlist(win, multi_select=True, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_BOTH)
# START Drag and Drop handling
win.callback_delete_request_add(win_del, gl)
gl.drop_item_container_add(ELM_SEL_FORMAT_TARGETS, gl_item_getcb,
dropcb=gl_dropcb)
gl.drag_item_container_add(ANIM_TIME, DRAG_TIMEOUT, gl_item_getcb,
gl_dnd_default_anim_data_getcb)
# FIXME: This causes genlist to resize the horiz axis very slowly :(
# Reenable this and resize the window horizontally, then try
# to resize it back.
#elm_genlist_mode_set(gl, ELM_LIST_LIMIT)
bxx.pack_end(gl)
gl.show()
for i in range (20):
gl.item_append(itc1, img[i % 9], flags=ELM_GENLIST_ITEM_NONE)
win.show()
def dnd_genlist_user_anim_clicked(obj, item=None):
win = StandardWindow("dnd-genlist-user-anim", "DnD-Genlist-User-Anim",
autodel=True, size=(680,800))
bxx = Box(win, horizontal=True, size_hint_weight=EXPAND_BOTH)
win.resize_object_add(bxx)
bxx.show()
for j in range(2):
gl = Genlist(win, multi_select=True, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_BOTH)
# START Drag and Drop handling
win.callback_delete_request_add(win_del, gl)
gl.drop_item_container_add(ELM_SEL_FORMAT_TARGETS, gl_item_getcb,
dropcb=gl_dropcb)
gl.drag_item_container_add(ANIM_TIME, DRAG_TIMEOUT, gl_item_getcb,
gl_data_getcb)
# We add mouse-down, up callbacks to start/stop drag animation
gl.event_callback_add(EVAS_CALLBACK_MOUSE_DOWN, gl_obj_mouse_down, gl)
# END Drag and Drop handling
# FIXME: This causes genlist to resize the horiz axis very slowly :(
# Reenable this and resize the window horizontally, then try to resize it back
#elm_genlist_mode_set(gl, ELM_LIST_LIMIT)
bxx.pack_end(gl)
gl.show()
for i in range(20):
gl.item_append(itc1, img[i % 9], flags=ELM_GENLIST_ITEM_NONE)
win.show()
def dnd_genlist_gengrid_clicked(obj, item=None):
win = StandardWindow("dnd-genlist-gengrid", "DnD-Genlist-Gengrid",
autodel=True, size=(680,800))
bxx = Box(win, horizontal=True, size_hint_weight=EXPAND_BOTH)
win.resize_object_add(bxx)
bxx.show()
gl = Genlist(win, multi_select=True,
size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
win.callback_delete_request_add(win_del, gl)
# START Drag and Drop handling
gl.drop_item_container_add(ELM_SEL_FORMAT_TARGETS, gl_item_getcb,
dropcb=gl_dropcb)
gl.drag_item_container_add(ANIM_TIME, DRAG_TIMEOUT, gl_item_getcb,
gl_dnd_default_anim_data_getcb)
# END Drag and Drop handling
# FIXME: This causes genlist to resize the horiz axis very slowly :(
# Reenable this and resize the window horizontally, then try to resize it back
#elm_genlist_mode_set(gl, ELM_LIST_LIMIT)
bxx.pack_end(gl)
gl.show()
for i in range(20):
gl.item_append(itc1, img[i % 9], flags=ELM_GENLIST_ITEM_NONE)
grid = Gengrid(win, item_size=(conf.scale * 150, conf.scale * 150),
horizontal=False, reorder_mode=False, multi_select=True,
size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
win.callback_delete_request_add(win_del, grid)
grid.drop_item_container_add(ELM_SEL_FORMAT_TARGETS, grid_item_getcb,
dropcb=grid_dropcb)
grid.drag_item_container_add(ANIM_TIME, DRAG_TIMEOUT, grid_item_getcb,
grid_data_getcb)
for i in range(20):
grid.item_append(gic, img[i % 9])
bxx.pack_end(grid)
grid.show()
win.show()
if __name__ == "__main__":
elementary.init()
win = StandardWindow("test", "python-elementary test application",
size=(320,520))
win.callback_delete_request_add(lambda o: elementary.exit())
box0 = Box(win, size_hint_weight=EXPAND_BOTH)
win.resize_object_add(box0)
box0.show()
lb = Label(win)
lb.text_set("Please select a test from the list below<br>"
"by clicking the test button to show the<br>"
"test window.")
lb.show()
fr = Frame(win, text="Information", content=lb)
box0.pack_end(fr)
fr.show()
items = [
("DnD Genlist Default Anim", dnd_genlist_default_anim_clicked),
("DnD Genlist User Anim", dnd_genlist_user_anim_clicked),
("DnD Genlist+Gengrid", dnd_genlist_gengrid_clicked),
]
li = List(win, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
box0.pack_end(li)
li.show()
for item in items:
li.item_append(item[0], callback=item[1])
li.go()
win.show()
elementary.run()
elementary.shutdown()
| |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
from typing import Iterable
import pytest
from pants.backend.python.pip_requirement import PipRequirement
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.lockfile_metadata import (
InvalidPythonLockfileReason,
PythonLockfileMetadata,
PythonLockfileMetadataV1,
PythonLockfileMetadataV2,
)
from pants.core.util_rules.lockfile_metadata import calculate_invalidation_digest
INTERPRETER_UNIVERSE = ["2.7", "3.5", "3.6", "3.7", "3.8", "3.9", "3.10"]
def reqset(*a) -> set[PipRequirement]:
return {PipRequirement.parse(i) for i in a}
def test_metadata_header_round_trip() -> None:
input_metadata = PythonLockfileMetadata.new(
InterpreterConstraints(["CPython==2.7.*", "PyPy", "CPython>=3.6,<4,!=3.7.*"]),
reqset("ansicolors==0.1.0"),
)
serialized_lockfile = input_metadata.add_header_to_lockfile(
b"req1==1.0", regenerate_command="./pants lock"
)
output_metadata = PythonLockfileMetadata.from_lockfile("a", serialized_lockfile)
assert input_metadata == output_metadata
def test_add_header_to_lockfile() -> None:
input_lockfile = b"""dave==3.1.4 \\
--hash=sha256:cab0c0c0c0c0dadacafec0c0c0c0cafedadabeefc0c0c0c0feedbeeffeedbeef \\
"""
expected = b"""
# This lockfile was autogenerated by Pants. To regenerate, run:
#
# ./pants lock
#
# --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
# {
# "version": 2,
# "valid_for_interpreter_constraints": [
# "CPython>=3.7"
# ],
# "generated_with_requirements": [
# "ansicolors==0.1.0"
# ]
# }
# --- END PANTS LOCKFILE METADATA ---
dave==3.1.4 \\
--hash=sha256:cab0c0c0c0c0dadacafec0c0c0c0cafedadabeefc0c0c0c0feedbeeffeedbeef \\
"""
def line_by_line(b: bytes) -> list[bytes]:
return [i for i in (j.strip() for j in b.splitlines()) if i]
metadata = PythonLockfileMetadata.new(
InterpreterConstraints([">=3.7"]), reqset("ansicolors==0.1.0")
)
result = metadata.add_header_to_lockfile(input_lockfile, regenerate_command="./pants lock")
assert line_by_line(result) == line_by_line(expected)
def test_invalidation_digest() -> None:
a = "flake8-pantsbuild>=2.0,<3"
b = "flake8-2020>=1.6.0,<1.7.0"
c = "flake8"
def assert_eq(left: Iterable[str], right: Iterable[str]) -> None:
assert calculate_invalidation_digest(left) == calculate_invalidation_digest(right)
def assert_neq(left: Iterable[str], right: Iterable[str]) -> None:
assert calculate_invalidation_digest(left) != calculate_invalidation_digest(right)
for reqs in itertools.permutations([a, b, c]):
assert_eq(reqs, [a, b, c])
assert_neq(reqs, [a, b])
assert_eq([], [])
assert_neq([], [a])
assert_eq([a, a, a, a], [a])
@pytest.mark.parametrize(
"user_digest, expected_digest, user_ic, expected_ic, matches",
[
(
"yes",
"yes",
[">=3.5.5"],
[">=3.5, <=3.6"],
False,
), # User ICs contain versions in the 3.7 range
("yes", "yes", [">=3.5.5, <=3.5.10"], [">=3.5, <=3.6"], True),
("yes", "no", [">=3.5.5, <=3.5.10"], [">=3.5, <=3.6"], False), # Digests do not match
(
"yes",
"yes",
[">=3.5.5, <=3.5.10"],
[">=3.5", "<=3.6"],
True,
), # User ICs match each of the actual ICs individually
(
"yes",
"yes",
[">=3.5.5, <=3.5.10"],
[">=3.5", "<=3.5.4"],
True,
), # User ICs do not match one of the individual ICs
("yes", "yes", ["==3.5.*, !=3.5.10"], [">=3.5, <=3.6"], True),
(
"yes",
"yes",
["==3.5.*"],
[">=3.5, <=3.6, !=3.5.10"],
False,
), # Excluded IC from expected range is valid for user ICs
("yes", "yes", [">=3.5, <=3.6", ">= 3.8"], [">=3.5"], True),
(
"yes",
"yes",
[">=3.5, <=3.6", ">= 3.8"],
[">=3.5, !=3.7.10"],
True,
), # Excluded version from expected ICs is not in a range specified
],
)
def test_is_valid_for_v1(user_digest, expected_digest, user_ic, expected_ic, matches) -> None:
m: PythonLockfileMetadata
m = PythonLockfileMetadataV1(InterpreterConstraints(expected_ic), expected_digest)
assert (
bool(
m.is_valid_for(
is_tool=True,
expected_invalidation_digest=user_digest,
user_interpreter_constraints=InterpreterConstraints(user_ic),
interpreter_universe=INTERPRETER_UNIVERSE,
user_requirements=set(),
)
)
== matches
)
_VALID_ICS = [">=3.5"]
_VALID_REQS = ["ansicolors==0.1.0", "requests==1.0.0"]
# Different scenarios that are the same for both tool lockfiles and user lockfiles.
_LockfileConditions = (
[_VALID_ICS, _VALID_ICS, _VALID_REQS, _VALID_REQS, []],
[_VALID_ICS, _VALID_ICS, _VALID_REQS, list(reversed(_VALID_REQS)), []],
[
_VALID_ICS,
_VALID_ICS,
_VALID_REQS,
[_VALID_REQS[0], "requests==2.0.0"],
[InvalidPythonLockfileReason.REQUIREMENTS_MISMATCH],
],
[
_VALID_ICS,
_VALID_ICS,
_VALID_REQS,
[_VALID_REQS[0], "different"],
[InvalidPythonLockfileReason.REQUIREMENTS_MISMATCH],
],
[
_VALID_ICS,
_VALID_ICS,
_VALID_REQS,
[*_VALID_REQS, "a-third-req"],
[InvalidPythonLockfileReason.REQUIREMENTS_MISMATCH],
],
[
_VALID_ICS,
["==2.7.*"],
_VALID_REQS,
_VALID_REQS,
[InvalidPythonLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH],
],
)
@pytest.mark.parametrize(
"is_tool, lock_ics, user_ics, lock_reqs, user_reqs, expected",
[
*([True, *conditions] for conditions in _LockfileConditions),
*([False, *conditions] for conditions in _LockfileConditions),
# Tools require exact matches, whereas user lockfiles only need to subset.
[False, _VALID_ICS, _VALID_ICS, _VALID_REQS, [_VALID_REQS[0]], []],
[
True,
_VALID_ICS,
_VALID_ICS,
_VALID_REQS,
[_VALID_REQS[0]],
[InvalidPythonLockfileReason.REQUIREMENTS_MISMATCH],
],
],
)
def test_is_valid_for_v2(
is_tool: bool,
user_ics: list[str],
lock_ics: list[str],
user_reqs: list[str],
lock_reqs: list[str],
expected: list[InvalidPythonLockfileReason],
) -> None:
m = PythonLockfileMetadataV2(InterpreterConstraints(lock_ics), reqset(*lock_reqs))
result = m.is_valid_for(
is_tool=is_tool,
expected_invalidation_digest="",
user_interpreter_constraints=InterpreterConstraints(user_ics),
interpreter_universe=INTERPRETER_UNIVERSE,
user_requirements=reqset(*user_reqs),
)
assert result.failure_reasons == set(expected)
| |
# ===========================================================================
# Using TIDIGITS dataset to predict gender (Boy, Girl, Woman, Man)
# ===========================================================================
# Saved WAV file format:
# 0) [train|test]
# 1) [m|w|b|g] (alias for man, women, boy, girl)
# 2) [age]
# 3) [dialectID]
# 4) [speakerID]
# 5) [production]
# 6) [digit_sequence]
# => "train_g_08_17_as_a_4291815"
# ===========================================================================
from __future__ import print_function, division, absolute_import
import matplotlib
matplotlib.use('Agg')
import os
os.environ['ODIN'] = 'gpu,float32,seed=1234'
import shutil
import pickle
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix, accuracy_score
from odin import backend as K, nnet as N, fuel as F, visual as V
from odin.stats import train_valid_test_split, freqcount, describe
from odin import ml
from odin import training
from odin import preprocessing as pp
from odin.visual import print_dist, print_confusion, print_hist
from odin.utils import (get_logpath, get_modelpath, get_datasetpath, get_exppath,
Progbar, unique_labels, chain, get_formatted_datetime,
as_tuple_of_shape, stdio, ctext, ArgController)
# ===========================================================================
# Input arguments
# ===========================================================================
args = ArgController(
).add('-nmix', "Number of GMM mixture", 128
).add('-tdim', "Dimension of t-matrix", 64
).add('-feat', "Acoustic feature: spec, mspec, mfcc", 'mfcc'
).add('--gmm', "Force re-run training GMM", False
).add('--stat', "Force re-extraction of centered statistics", False
).add('--tmat', "Force re-run training Tmatrix", False
).add('--ivec', "Force re-run extraction of i-vector", False
).add('--all', "Run all the system again, just a shortcut", False
).add('--acous', "Force re-run acoustic feature extraction", False
).parse()
args.gmm |= args.all
args.stat |= args.all | args.gmm
args.tmat |= args.all | args.stat
args.ivec |= args.all | args.tmat
FEAT = args.feat
# ===========================================================================
# Const
# ===========================================================================
EXP_DIR = get_exppath('FSDD')
PATH_ACOUSTIC_FEATURES = os.path.join(EXP_DIR, 'features')
# ====== GMM trainign ====== #
NMIX = args.nmix
GMM_NITER = 12
GMM_DOWNSAMPLE = 1
GMM_STOCHASTIC = True
GMM_DTYPE = 'float64'
# ====== IVEC training ====== #
TV_DIM = args.tdim
TV_NITER = 16
TV_DTYPE = 'float64'
# ===========================================================================
# Extract acoustic features
# ===========================================================================
# path to preprocessed dataset
all_files, meta = F.FSDD.load()
if not os.path.exists(PATH_ACOUSTIC_FEATURES) or \
len(os.listdir(PATH_ACOUSTIC_FEATURES)) != 14 or \
bool(args.acous):
extractors = pp.make_pipeline(steps=[
pp.speech.AudioReader(sr_new=8000, best_resample=True, remove_dc=True),
pp.speech.PreEmphasis(coeff=0.97),
pp.base.Converter(converter=lambda x: os.path.basename(x).split('.')[0],
input_name='path', output_name='name'),
# ====== STFT ====== #
pp.speech.STFTExtractor(frame_length=0.025, step_length=0.005,
n_fft=512, window='hamm', energy=False),
# ====== spectrogram ====== #
pp.speech.PowerSpecExtractor(power=2.0, output_name='spec'),
pp.speech.MelsSpecExtractor(n_mels=24, fmin=64, fmax=4000,
input_name=('spec', 'sr'), output_name='mspec'),
pp.speech.MFCCsExtractor(n_ceps=20,
remove_first_coef=True, first_coef_energy=True,
input_name='mspec', output_name='mfcc'),
pp.base.DeltaExtractor(input_name='mfcc', order=(0, 1, 2)),
# ====== SAD ====== #
pp.base.RenameFeatures(input_name='mfcc_energy', output_name='energy'),
pp.speech.SADthreshold(energy_threshold=0.55, smooth_window=5,
input_name='energy', output_name='sad'),
# ====== normalization ====== #
pp.base.DeleteFeatures(input_name=('stft', 'spec', 'sad_threshold')),
pp.speech.AcousticNorm(mean_var_norm=True, windowed_mean_var_norm=True,
input_name=('mspec', 'mfcc')),
# ====== post processing ====== #
pp.base.AsType(dtype='float16'),
], debug=False)
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
processor = pp.FeatureProcessor(
jobs=all_files,
path=PATH_ACOUSTIC_FEATURES,
extractor=extractors,
n_cache=120,
ncpu=None,
override=True,
identifier='name',
log_path=os.path.join(EXP_DIR, 'processor.log'),
stop_on_failure=True)
processor.run()
# pp.validate_features(processor,
# nb_samples=12,
# path=os.path.join(EXP_DIR, 'feature_validation'),
# override=True)
ds = F.Dataset(PATH_ACOUSTIC_FEATURES, read_only=True)
print(ds)
indices = list(ds['indices_%s' % args.feat].items())
print("Utterances length:")
print(" ", describe([end - start for name, (start, end) in indices], shorten=True))
# ===========================================================================
# Basic path for GMM, T-matrix and I-vector
# ===========================================================================
EXP_DIR = os.path.join(EXP_DIR, '%s_%d_%d' % (FEAT, NMIX, TV_DIM))
LOG_PATH = get_logpath(name='log.txt', override=False, root=EXP_DIR, odin_base=False)
stdio(LOG_PATH)
print("Exp-dir:", ctext(EXP_DIR, 'cyan'))
print("Log path:", ctext(LOG_PATH, 'cyan'))
# ====== ivec path ====== #
GMM_PATH = os.path.join(EXP_DIR, 'gmm')
TMAT_PATH = os.path.join(EXP_DIR, 'tmat')
# zero order statistics
Z_PATH = (
os.path.join(EXP_DIR, 'Z_train'),
os.path.join(EXP_DIR, 'Z_test'))
# first order statistics
F_PATH = (
os.path.join(EXP_DIR, 'F_train'),
os.path.join(EXP_DIR, 'F_test'))
# i-vector path
I_PATH = (
os.path.join(EXP_DIR, 'I_train'),
os.path.join(EXP_DIR, 'I_test'))
# labels
L_PATH = ( # labels
os.path.join(EXP_DIR, 'L_train'),
os.path.join(EXP_DIR, 'L_test'))
# ===========================================================================
# Helper
# ===========================================================================
# jackson speaker for testing, all other speaker for training
def is_train(x):
return x.split('_')[1] != 'jackson'
def extract_digit(x):
return x.split('_')[0]
fn_extract = extract_digit
fn_label, labels = unique_labels([i[0] for i in indices],
key_func=fn_extract,
return_labels=True)
print("Labels:", ctext(labels, 'cyan'))
# ===========================================================================
# Preparing data
# ===========================================================================
train_files = [] # (name, (start, end)) ...
test_files = []
for name, (start, end) in indices:
if is_train(name):
train_files.append((name, (start, end)))
else:
test_files.append((name, (start, end)))
# name for each dataset, useful for later
data_name = ['train', 'test']
print("#Train:", len(train_files))
print("#Test:", len(test_files))
# ===========================================================================
# GMM
# ===========================================================================
if not os.path.exists(GMM_PATH) or args.gmm:
gmm = ml.GMM(nmix=NMIX, nmix_start=1,
niter=GMM_NITER, dtype=GMM_DTYPE,
allow_rollback=True, exit_on_error=True,
batch_size_cpu=2048, batch_size_gpu=2048,
downsample=GMM_DOWNSAMPLE,
stochastic_downsample=GMM_STOCHASTIC,
device='gpu',
seed=1234, path=GMM_PATH)
gmm.fit((ds[FEAT], train_files))
else:
with open(GMM_PATH, 'rb') as f:
gmm = pickle.load(f)
print(gmm)
# ===========================================================================
# Extract Zero and first order statistics
# ===========================================================================
stats = {}
y_true = {}
for name, files, z_path, f_path, l_path in zip(
data_name,
(train_files, test_files),
Z_PATH, F_PATH, L_PATH):
# extracting zeroth and first order statistics
if not all(os.path.exists(i) for i in (z_path, f_path, l_path)) or\
args.stat:
print('========= Extracting statistics for: "%s" =========' % name)
gmm.transform_to_disk(X=ds[FEAT], indices=files,
pathZ=z_path, pathF=f_path, name_path=l_path,
dtype='float32', device='cpu', ncpu=None,
override=True)
# load the statistics in MmapData
y_true[name] = [fn_label(i) for i in np.genfromtxt(fname=l_path, dtype=str)]
stats[name] = (F.MmapData(path=z_path, read_only=True),
F.MmapData(path=f_path, read_only=True))
for name, x in stats.items():
print(ctext(name + ':', 'cyan'), x)
# ===========================================================================
# Training T-matrix
# ===========================================================================
if not os.path.exists(TMAT_PATH) or args.tmat:
tmat = ml.Tmatrix(tv_dim=TV_DIM, gmm=gmm,
niter=TV_NITER, dtype=TV_DTYPE,
batch_size_cpu='auto', batch_size_gpu='auto',
device='gpu', ncpu=1, gpu_factor=3,
path=TMAT_PATH)
tmat.fit(X=(stats['train'][0], # Z_train
stats['train'][1])) # F_train
else:
with open(TMAT_PATH, 'rb') as f:
tmat = pickle.load(f)
print(tmat)
# ===========================================================================
# Extracting I-vectors
# ===========================================================================
ivecs = {}
for i_path, name in zip(I_PATH, data_name):
if not os.path.exists(i_path) or args.ivec:
print('========= Extracting ivecs for: "%s" =========' % name)
z, f = stats[name]
tmat.transform_to_disk(path=i_path, Z=z, F=f,
dtype='float32', device='gpu', ncpu=1,
override=True)
# load extracted ivec
ivecs[name] = F.MmapData(i_path, read_only=True)
# ====== print the i-vectors ====== #
for name in data_name:
print('========= %s =========' % name)
print(ctext('i-vectors:', 'cyan'))
print(ctext(' *', 'yellow'), ivecs[name])
print(ctext('z-stats:', 'cyan'))
print(ctext(' *', 'yellow'), stats[name][0])
print(ctext('f-stats:', 'cyan'))
print(ctext(' *', 'yellow'), stats[name][1])
print(ctext('labels:', 'cyan'))
print(ctext(' *', 'yellow'), len(y_true[name]))
# ==================== turn off all annoying warning ==================== #
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
# ===========================================================================
# I-vector
# ===========================================================================
X_train = ivecs['train']
X_test = ivecs['test']
# ====== cosine scoring ====== #
print(ctext("==== '%s'" % "Ivec cosine-scoring", 'cyan'))
scorer = ml.Scorer(centering=True, wccn=True, lda=True, method='cosine')
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
# ====== GMM scoring ====== #
print(ctext("==== '%s'" % "Ivec GMM-scoring-ova", 'cyan'))
scorer = ml.GMMclassifier(strategy="ova",
n_components=3, covariance_type='full',
centering=True, wccn=True, unit_length=True,
lda=False, concat=False)
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
# ====== GMM scoring ====== #
print(ctext("==== '%s'" % "Ivec GMM-scoring-all", 'cyan'))
scorer = ml.GMMclassifier(strategy="all", covariance_type='full',
centering=True, wccn=True, unit_length=True,
lda=False, concat=False)
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
# ====== plda scoring ====== #
print(ctext("==== '%s'" % "Ivec PLDA-scoring", 'cyan'))
scorer = ml.PLDA(n_phi=TV_DIM // 2, n_iter=12,
centering=True, wccn=True, unit_length=True,
random_state=1234)
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
# ====== svm scoring ====== #
print(ctext("==== '%s'" % "Ivec SVM-scoring", 'cyan'))
scorer = ml.Scorer(wccn=True, lda=True, method='svm')
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
# ===========================================================================
# Super-vector
# ===========================================================================
X_train = stats['train'][1]
X_test = stats['test'][1]
X_train, X_test = ml.fast_pca(X_train, X_test, n_components=args.tdim,
algo='ppca', random_state=1234)
# ====== GMM scoring ====== #
print(ctext("==== '%s'" % "Super-Vector GMM-scoring-ova", 'cyan'))
scorer = ml.GMMclassifier(strategy="ova",
n_components=3, covariance_type='full',
centering=True, wccn=True, unit_length=True,
lda=False, concat=False)
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
# ====== plda scoring ====== #
print(ctext("==== '%s'" % "Super-Vector PLDA-scoring", 'cyan'))
scorer = ml.PLDA(n_phi=TV_DIM // 2, n_iter=12,
centering=True, wccn=True, unit_length=True,
random_state=1234)
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
# ====== svm scoring ====== #
print(ctext("==== '%s'" % "Super-Vector SVM-scoring", 'cyan'))
scorer = ml.Scorer(wccn=True, lda=True, method='svm')
scorer.fit(X=X_train, y=y_true['train'])
scorer.evaluate(X_test, y_true['test'], labels=labels)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN.
These methods come from https://arxiv.org/abs/1606.03498 and
https://arxiv.org/abs/1706.08500.
NOTE: This implementation uses the same weights as in
https://github.com/openai/improved-gan/blob/master/inception_score/model.py,
but is more numerically stable and is an unbiased estimator of the true
Inception score even when splitting the inputs into batches.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import tarfile
from six.moves import urllib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
__all__ = [
'get_graph_def_from_disk',
'get_graph_def_from_resource',
'get_graph_def_from_url_tarball',
'preprocess_image',
'run_image_classifier',
'run_inception',
'inception_score',
'classifier_score',
'classifier_score_from_logits',
'frechet_inception_distance',
'frechet_classifier_distance',
'frechet_classifier_distance_from_activations',
'INCEPTION_DEFAULT_IMAGE_SIZE',
]
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz'
INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb'
INCEPTION_INPUT = 'Mul:0'
INCEPTION_OUTPUT = 'logits:0'
INCEPTION_FINAL_POOL = 'pool_3:0'
INCEPTION_DEFAULT_IMAGE_SIZE = 299
def _validate_images(images, image_size):
images = ops.convert_to_tensor(images)
images.shape.with_rank(4)
images.shape.assert_is_compatible_with(
[None, image_size, image_size, None])
return images
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
def preprocess_image(
images, height=INCEPTION_DEFAULT_IMAGE_SIZE,
width=INCEPTION_DEFAULT_IMAGE_SIZE, scope=None):
"""Prepare a batch of images for evaluation.
This is the preprocessing portion of the graph from
http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz.
Note that it expects Tensors in [0, 255]. This function maps pixel values to
[-1, 1] and resizes to match the InceptionV1 network.
Args:
images: 3-D or 4-D Tensor of images. Values are in [0, 255].
height: Integer. Height of resized output image.
width: Integer. Width of resized output image.
scope: Optional scope for name_scope.
Returns:
3-D or 4-D float Tensor of prepared image(s). Values are in [-1, 1].
"""
is_single = images.shape.ndims == 3
with ops.name_scope(scope, 'preprocess', [images, height, width]):
if not images.dtype.is_floating:
images = math_ops.to_float(images)
if is_single:
images = array_ops.expand_dims(images, axis=0)
resized = image_ops.resize_bilinear(images, [height, width])
resized = (resized - 128.0) / 128.0
if is_single:
resized = array_ops.squeeze(resized, axis=0)
return resized
def _kl_divergence(p, p_logits, q):
"""Computes the Kullback-Liebler divergence between p and q.
This function uses p's logits in some places to improve numerical stability.
Specifically:
KL(p || q) = sum[ p * log(p / q) ]
= sum[ p * ( log(p) - log(q) ) ]
= sum[ p * ( log_softmax(p_logits) - log(q) ) ]
Args:
p: A 2-D floating-point Tensor p_ij, where `i` corresponds to the minibatch
example and `j` corresponds to the probability of being in class `j`.
p_logits: A 2-D floating-point Tensor corresponding to logits for `p`.
q: A 1-D floating-point Tensor, where q_j corresponds to the probability
of class `j`.
Returns:
KL divergence between two distributions. Output dimension is 1D, one entry
per distribution in `p`.
Raises:
ValueError: If any of the inputs aren't floating-point.
ValueError: If p or p_logits aren't 2D.
ValueError: If q isn't 1D.
"""
for tensor in [p, p_logits, q]:
if not tensor.dtype.is_floating:
raise ValueError('Input %s must be floating type.', tensor.name)
p.shape.assert_has_rank(2)
p_logits.shape.assert_has_rank(2)
q.shape.assert_has_rank(1)
return math_ops.reduce_sum(
p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)
def get_graph_def_from_disk(filename):
"""Get a GraphDef proto from a disk location."""
with gfile.FastGFile(filename, 'rb') as f:
return graph_pb2.GraphDef.FromString(f.read())
def get_graph_def_from_resource(filename):
"""Get a GraphDef proto from within a .par file."""
return graph_pb2.GraphDef.FromString(resource_loader.load_resource(filename))
def get_graph_def_from_url_tarball(url, filename):
"""Get a GraphDef proto from a tarball on the web."""
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
url, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
tar_filename, _ = urllib.request.urlretrieve(url, reporthook=_progress)
with tarfile.open(tar_filename, 'r:gz') as tar:
proto_str = tar.extractfile(filename).read()
return graph_pb2.GraphDef.FromString(proto_str)
def _default_graph_def_fn():
return get_graph_def_from_url_tarball(INCEPTION_URL, INCEPTION_FROZEN_GRAPH)
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_DEFAULT_IMAGE_SIZE,
input_tensor=INCEPTION_INPUT,
output_tensor=INCEPTION_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name or list of output Tensors. This function will compute
activations at the specified layer. Examples include INCEPTION_V3_OUTPUT
and INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Tensor or Tensors corresponding to computed `output_tensor`.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if isinstance(activations, list):
for i, activation in enumerate(activations):
if array_ops.rank(activation) != 2:
activations[i] = layers.flatten(activation)
else:
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
def run_image_classifier(tensor, graph_def, input_tensor,
output_tensor, scope='RunClassifier'):
"""Runs a network from a frozen graph.
Args:
tensor: An Input tensor.
graph_def: A GraphDef proto.
input_tensor: Name of input tensor in graph def.
output_tensor: A tensor name or list of tensor names in graph def.
scope: Name scope for classifier.
Returns:
Classifier output if `output_tensor` is a string, or a list of outputs if
`output_tensor` is a list.
Raises:
ValueError: If `input_tensor` or `output_tensor` aren't in the graph_def.
"""
input_map = {input_tensor: tensor}
is_singleton = isinstance(output_tensor, str)
if is_singleton:
output_tensor = [output_tensor]
classifier_outputs = importer.import_graph_def(
graph_def, input_map, output_tensor, name=scope)
if is_singleton:
classifier_outputs = classifier_outputs[0]
return classifier_outputs
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `classifier_fn`.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = functional_ops.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return classifier_score_from_logits(logits)
def classifier_score_from_logits(logits):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
logits: A 2D Tensor of logits.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `logits`.
"""
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.to_double(logits)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, logits_dtype)
return final_score
inception_score = functools.partial(
classifier_score,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_OUTPUT))
def trace_sqrt_product(sigma, sigma_v):
"""Find the trace of the positive sqrt of product of covariance matrices.
'_symmetric_matrix_square_root' only works for symmetric matrices, so we
cannot just take _symmetric_matrix_square_root(sigma * sigma_v).
('sigma' and 'sigma_v' are symmetric, but their product is not necessarily).
Let sigma = A A so A = sqrt(sigma), and sigma_v = B B.
We want to find trace(sqrt(sigma sigma_v)) = trace(sqrt(A A B B))
Note the following properties:
(i) forall M1, M2: eigenvalues(M1 M2) = eigenvalues(M2 M1)
=> eigenvalues(A A B B) = eigenvalues (A B B A)
(ii) if M1 = sqrt(M2), then eigenvalues(M1) = sqrt(eigenvalues(M2))
=> eigenvalues(sqrt(sigma sigma_v)) = sqrt(eigenvalues(A B B A))
(iii) forall M: trace(M) = sum(eigenvalues(M))
=> trace(sqrt(sigma sigma_v)) = sum(eigenvalues(sqrt(sigma sigma_v)))
= sum(sqrt(eigenvalues(A B B A)))
= sum(eigenvalues(sqrt(A B B A)))
= trace(sqrt(A B B A))
= trace(sqrt(A sigma_v A))
A = sqrt(sigma). Both sigma and A sigma_v A are symmetric, so we **can**
use the _symmetric_matrix_square_root function to find the roots of these
matrices.
Args:
sigma: a square, symmetric, real, positive semi-definite covariance matrix
sigma_v: same as sigma
Returns:
The trace of the positive square root of sigma*sigma_v
"""
# Note sqrt_sigma is called "A" in the proof above
sqrt_sigma = _symmetric_matrix_square_root(sigma)
# This is sqrt(A sigma_v A) above
sqrt_a_sigmav_a = math_ops.matmul(
sqrt_sigma, math_ops.matmul(sigma_v, sqrt_sigma))
return math_ops.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
def frechet_classifier_distance(real_images,
generated_images,
classifier_fn,
num_batches=1):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_images: Real images to use to compute Frechet Inception distance.
generated_images: Generated images to use to compute Frechet Inception
distance.
classifier_fn: A function that takes images and produces activations
based on a classifier.
num_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of `classifier_fn`
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_batches)
imgs = array_ops.stack(real_images_list + generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
activations = functional_ops.map_fn(
fn=classifier_fn,
elems=imgs,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
# Split the activations by the real and generated images.
real_a, gen_a = array_ops.split(activations, [num_batches, num_batches], 0)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
return frechet_classifier_distance_from_activations(real_a, gen_a)
def frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_activations: Real images to use to compute Frechet Inception distance.
generated_activations: Generated images to use to compute Frechet Inception
distance.
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.to_double(real_activations)
generated_activations = math_ops.to_double(generated_activations)
# Compute mean and covariance matrices of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_v = math_ops.reduce_mean(generated_activations, 0)
num_examples = math_ops.to_double(array_ops.shape(real_activations)[0])
# sigma = (1 / (n - 1)) * (X - mu) (X - mu)^T
real_centered = real_activations - m
sigma = math_ops.matmul(
real_centered, real_centered, transpose_a=True) / (num_examples - 1)
gen_centered = generated_activations - m_v
sigma_v = math_ops.matmul(
gen_centered, gen_centered, transpose_a=True) / (num_examples - 1)
# Find the Tr(sqrt(sigma sigma_v)) component of FID
sqrt_trace_component = trace_sqrt_product(sigma, sigma_v)
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.trace(sigma + sigma_v) - 2.0 * sqrt_trace_component
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_v)) # This uses the L2 norm.
fid = trace + mean
if activations_dtype != dtypes.float64:
fid = math_ops.cast(fid, activations_dtype)
return fid
frechet_inception_distance = functools.partial(
frechet_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
| |
import demistomock as demisto
from CommonServerPython import *
import urllib3
import traceback
from typing import Any, Dict, Optional, Union
import ntpath
from dateparser import parse
# Disable insecure warnings
urllib3.disable_warnings()
""" CONSTANTS """
VERSION = 24
MAX_RESULTS = 100
""" CLIENT CLASS """
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def __init__(
self,
base_url,
project_name,
params,
verify=True,
proxy=False,
ok_codes=tuple(),
headers=None,
auth=None,
):
self.project_name = project_name
self.params = params
super().__init__(base_url, verify, proxy, ok_codes, headers, auth)
def get_project_list(self):
return self._http_request(
method="GET", url_suffix="/projects", params=self.params
)
def get_webhooks_list(self, project_name: str):
if project_name:
project_name_to_pass = project_name
else:
project_name_to_pass = self.project_name
return self._http_request(
method="GET",
url_suffix=f"/project/{project_name_to_pass}/webhooks",
params=self.params,
)
def get_jobs_list(
self,
id_list: list,
group_path: str,
job_filter: str,
job_exec_filter: str,
group_path_exact: str,
scheduled_filter: str,
server_node_uuid_filter: str,
project_name: str,
):
"""
This function returns a list of all existing projects.
:param id_list: list of Job IDs to include
:param group_path: include all jobs within that group path. if not specified, default is: "*".
:param job_filter: specify a filter for a job Name, apply to any job name that contains this value
:param job_exec_filter: specify an exact job name to match
:param group_path_exact: specify an exact group path to match. if not specified, default is: "*".
:param scheduled_filter: return only scheduled or only not scheduled jobs. can either be "true" or "false
:param server_node_uuid_filter: return all jobs related to a selected server UUID".
:param project_name: A project name to list its jobs
:return: api response.
"""
request_params: Dict[str, Any] = {}
if id_list:
request_params["idlist"] = ",".join(id_list)
if group_path:
request_params["groupPath"] = group_path
if job_filter:
request_params["jobFilter"] = job_filter
if job_exec_filter:
request_params["jobExactFilter"] = job_exec_filter
if group_path_exact:
request_params["groupPathExact"] = group_path_exact
if scheduled_filter:
request_params["scheduledFilter"] = scheduled_filter
if server_node_uuid_filter:
request_params["serverNodeUUIDFilter"] = server_node_uuid_filter
project_name_to_pass = project_name if project_name else self.project_name
request_params.update(self.params)
return self._http_request(
method="GET",
url_suffix=f"/project/{project_name_to_pass}/jobs",
params=request_params,
)
def execute_job(
self,
job_id: str,
arg_string: str,
log_level: str,
as_user: str,
node_filter: str,
run_at_time: str,
options: dict,
run_at_time_raw: str,
):
"""
This function runs an existing job
:param arg_string: execution arguments for the selected job: -opt1 value1 -opt2 value2
:param job_id: id of the job you want to execute
:param log_level: specifying the loglevel to use: 'DEBUG','VERBOSE','INFO','WARN','ERROR'
:param as_user: identifying the user who ran the job
:param node_filter: can be a node filter string
:param run_at_time: select a time to run the job. can be either in: 1 hour, 1 week, 1 day.
:param options: add options for running a job
:param run_at_time_raw: select a time to run the job in iso 8061 time as string
:return: api response
"""
request_body: Dict[str, Any] = {}
if arg_string:
request_body["argString"] = arg_string
if log_level:
request_body["loglevel"] = log_level
if as_user:
request_body["asUser"] = as_user
if node_filter:
request_body["filter"] = node_filter
if options:
request_body["options"] = options
if run_at_time:
request_body["runAtTime"] = run_at_time
elif run_at_time_raw:
request_body["runAtTime"] = run_at_time_raw
return self._http_request(
method="POST",
url_suffix=f"/job/{job_id}/executions",
params=self.params,
data=str(request_body),
)
def retry_job(
self,
job_id: str,
arg_string: str,
log_level: str,
as_user: str,
failed_nodes: str,
execution_id: str,
options: dict,
):
"""
This function retry running a failed execution.
:param arg_string: execution arguments for the selected job: -opt1 value1 -opt2 value2
:param job_id: id of the job you want to execute
:param log_level: specifying the log level to use: 'DEBUG','VERBOSE','INFO','WARN','ERROR'
:param as_user: identifying the user who ran the job
:param failed_nodes: can either ben true or false. true for run all nodes and false for running only failed nodes
:param execution_id: for specified what execution to rerun
:param options: add options for running a job
:return: api response
"""
request_body: Dict[str, Any] = {}
if arg_string:
request_body["argString"] = arg_string
if log_level:
request_body["loglevel"] = log_level
if as_user:
request_body["asUser"] = as_user
if failed_nodes:
request_body["failedNodes"] = failed_nodes
if options:
request_body["options"] = options
return self._http_request(
method="POST",
url_suffix=f"/job/{job_id}/retry/{execution_id}",
params=self.params,
data=str(request_body),
)
def job_execution_query(
self,
status_filter: str,
aborted_by_filter: str,
user_filter: str,
recent_filter: str,
older_filter: str,
begin: str,
end: str,
adhoc: str,
job_id_list_filter: list,
exclude_job_id_list_filter: list,
job_list_filter: list,
exclude_job_list_filter: list,
group_path: str,
group_path_exact: str,
exclude_group_path: str,
exclude_group_path_exact: str,
job_filter: str,
exclude_job_filter: str,
job_exact_filter: str,
exclude_job_exact_filter: str,
execution_type_filter: str,
max_results: Optional[int],
offset: Optional[int],
project_name: str,
):
"""
This function returns previous and active executions
:param status_filter: execution status, can be either: "running", succeeded", "failed" or "aborted"
:param aborted_by_filter: Username who aborted an execution
:param user_filter: Username who started the execution
:param recent_filter: for specify when the execution has occur. the format is 'XY' when 'X' is a number and 'Y'
can be: h - hour, d - day, w - week, m - month, y - year
:param older_filter: return executions that completed before the specified relative period of time. works with
the same format as 'recent_filter'
:param begin: Specify exact date for earliest execution completion time
:param end: Specify exact date for latest execution completion time
:param adhoc: can be true or false. true for include Adhoc executions
:param job_id_list_filter: specify a Job IDs to filter by
:param exclude_job_id_list_filter: specify a Job IDs to exclude
:param job_list_filter: specify a full job group/name to include.
:param exclude_job_list_filter: specify a full Job group/name to exclude
:param group_path: specify a group or partial group to include all jobs within that group path.
:param group_path_exact: like 'group_path' but you need to specify an exact group path to match
:param exclude_group_path specify a group or partial group path to exclude all jobs within that group path
:param exclude_group_path_exact: specify a group or partial group path to exclude jobs within that group path
:param job_filter: provide here a job name to query
:param exclude_job_filter: provide here a job name to exclude
:param job_exact_filter: provide here an exact job name to match
:param exclude_job_exact_filter: specify an exact job name to exclude
:param execution_type_filter: specify the execution type, can be: 'scheduled', 'user' or 'user-scheduled'
:param max_results: maximum number of results to get from the api
:param offset: offset for first result to include
:param project_name: the project name that you want to get its execution
:return: api response
"""
request_params: Dict[str, Any] = {}
if status_filter:
request_params["statusFilter"] = status_filter
if aborted_by_filter:
request_params["abortedbyFilter"] = aborted_by_filter
if user_filter:
request_params["userFilter"] = user_filter
if recent_filter:
request_params["recentFilter"] = recent_filter
if older_filter:
request_params["olderFilter"] = older_filter
if begin:
request_params["begin"] = begin
if end:
request_params["end"] = end
if adhoc:
request_params["adhoc"] = adhoc
if job_id_list_filter:
request_params["jobIdListFilter"] = job_id_list_filter
if exclude_job_id_list_filter:
request_params["excludeJobIdListFilter"] = exclude_job_id_list_filter
if job_list_filter:
request_params["jobListFilter"] = job_list_filter
if exclude_job_list_filter:
request_params["excludeJobListFilter"] = exclude_job_list_filter
if group_path:
request_params["groupPath"] = group_path
if group_path_exact:
request_params["groupPathExact"] = group_path_exact
if exclude_group_path:
request_params["excludeGroupPath"] = exclude_group_path
if exclude_group_path_exact:
request_params["excludeGroupPathExact"] = exclude_group_path_exact
if job_filter:
request_params["jobFilter"] = job_filter
if exclude_job_filter:
request_params["excludeJobFilter"] = exclude_job_filter
if job_exact_filter:
request_params["jobExactFilter"] = job_exact_filter
if exclude_job_exact_filter:
request_params["excludeJobExactFilter"] = exclude_job_exact_filter
if execution_type_filter:
request_params["executionTypeFilter"] = execution_type_filter
if max_results:
request_params["max"] = max_results
if offset:
request_params["offset"] = offset
project_name_to_pass = project_name if project_name else self.project_name
request_params["max"] = max_results if max_results else MAX_RESULTS
request_params.update(self.params)
return self._http_request(
method="POST",
url_suffix=f"/project/{project_name_to_pass}/executions",
params=request_params,
)
def job_execution_output(self, execution_id: int):
"""
This function gets metadata regarding workflow state
:param execution_id: id to execute.
:return: api response
"""
return self._http_request(
method="GET",
url_suffix=f"/execution/{execution_id}/output/state",
params=self.params,
)
def job_execution_abort(self, execution_id: int):
"""
This function aborts live executions
:param execution_id: id to abort execution
:return: api response
"""
return self._http_request(
method="GET",
url_suffix=f"/execution/{execution_id}/abort",
params=self.params,
)
def adhoc_run(
self,
project_name: str,
exec_command: str,
node_thread_count: str,
node_keepgoing: str,
as_user: str,
node_filter: str,
):
"""
This function executes shell commands in nodes.
:param project_name: project to run the command on
:param exec_command: the shell command that you want to run
:param node_thread_count: threadcount to use
:param node_keepgoing: 'true' for continue executing on other nodes after a failure. 'false' otherwise
:param as_user: specifies a username identifying the user who ran the command
:param node_filter: node filter to add
:return: api response
"""
request_params: Dict[str, Any] = {}
if exec_command:
request_params["exec"] = exec_command
if node_thread_count:
request_params["nodeThreadcount"] = node_thread_count
if node_keepgoing:
request_params["nodeKeepgoing"] = node_keepgoing
if as_user:
request_params["asUser"] = as_user
if node_filter:
request_params["filter"] = node_filter
if project_name:
project_name_to_pass = project_name
else:
project_name_to_pass = self.project_name
request_params.update(self.params)
return self._http_request(
method="GET",
url_suffix=f"/project/{project_name_to_pass}/run/command",
params=request_params,
)
def adhoc_script_run_from_url(
self,
project_name: str,
script_url: str,
node_thread_count: str,
node_keepgoing: str,
as_user: str,
node_filter: str,
script_interpreter: str,
interpreter_args_quoted: str,
file_extension: str,
arg_string: str,
):
"""
This function runs a script downloaded from a URL
:param project_name: project to run the command on
:param script_url: a URL pointing to a script file
:param node_thread_count: threadcount to use
:param node_keepgoing: 'true' for continue executing on other nodes after a failure. false otherwise
:param as_user: specifies a username identifying the user who ran the command
:param node_filter: node filter string
:param script_interpreter: a command to use to run the script
:param interpreter_args_quoted: if true, the script file and arguments will be quoted as the last argument to
the script_interpreter. false otherwise.
:param file_extension: extension of the script file
:param arg_string: arguments to pass to the script when executed.
:return: api response
"""
request_params: Dict[str, Any] = {}
if node_thread_count:
request_params["nodeThreadcount"] = node_thread_count
if node_keepgoing:
request_params["nodeKeepgoing"] = node_keepgoing
if as_user:
request_params["asUser"] = as_user
if node_filter:
request_params["filter"] = node_filter
if script_interpreter:
request_params["scriptInterpreter"] = script_interpreter
if interpreter_args_quoted:
request_params["interpreterArgsQuoted"] = interpreter_args_quoted
if file_extension:
request_params["fileExtension"] = file_extension
if arg_string:
request_params["argString"] = arg_string
if project_name:
project_name_to_pass = project_name
else:
project_name_to_pass = self.project_name
request_params.update(self.params)
self._headers["Content-Type"] = "application/x-www-form-urlencoded"
return self._http_request(
method="POST",
data={"scriptURL": script_url},
url_suffix=f"/project/{project_name_to_pass}/run/url",
params=request_params,
)
def webhook_event_send(self, auth_token: str, options: str, free_json: str):
"""
This function posts data to the webhook endpoint
:param options: data that you want to post as dict
:param free_json: data you want to post as json
:param auth_token: auto token of the webhook
:return: api response
"""
if options:
request_params: str = options
else:
if free_json:
request_params = free_json
return self._http_request(
method="POST",
url_suffix=f"/webhook/{auth_token}",
params=self.params,
data=request_params,
)
def adhoc_script_run(
self,
project_name: str,
arg_string: str,
node_thread_count: str,
node_keepgoing: str,
as_user: str,
node_filter: str,
script_interpreter: str,
interpreter_args_quoted: str,
file_extension: str,
entry_id: str,
):
"""
This function runs a script from file
:param project_name: project to run the script file
:param arg_string: arguments for the script when executed
:param node_thread_count: threadcount to use
:param node_keepgoing: 'true' for continue executing on other nodes after a failure. false otherwise
:param as_user: identifying the user who ran the job
:param node_filter:
:param script_interpreter: a command to use to run the script
:param interpreter_args_quoted: if true, the script file and arguments will be quoted as the last argument to
:param file_extension: extension of of the script file
:param entry_id: Demisto id for the uploaded script file you want to run
:return: api response
"""
request_params: Dict[str, str] = {}
if arg_string:
request_params["argString"] = arg_string
if node_thread_count:
request_params["nodeThreadcount"] = node_thread_count
if node_keepgoing:
request_params["nodeKeepgoing"] = node_keepgoing
if as_user:
request_params["asUser"] = as_user
if script_interpreter:
request_params["scriptInterpreter"] = script_interpreter
if interpreter_args_quoted:
request_params["interpreterArgsQuoted"] = interpreter_args_quoted
if file_extension:
request_params["fileExtension"] = file_extension
if node_filter:
request_params["filter"] = node_filter
if project_name:
project_name_to_pass = project_name
else:
project_name_to_pass = self.project_name
file_path = demisto.getFilePath(entry_id).get("path", None)
if not file_path:
raise DemistoException(
f"Could not find file path to the next entry id: {entry_id}. \n"
f"Please provide another one."
)
else:
file_name = ntpath.basename(file_path)
request_params.update(self.params)
del self._headers["Content-Type"]
with open(file_path, "rb") as file:
self._headers.update(
{
"Content-Disposition": f'form-data; name="file"; filename="{file_name}"'
}
)
return self._http_request(
method="POST",
files={"scriptFile": file},
url_suffix=f"/project/{project_name_to_pass}/run/script",
params=request_params,
)
""" HELPER FUNCTIONS """
def filter_results(
results: Union[list, dict], fields_to_remove: list, remove_signs: list
) -> Union[list, dict]:
new_results = []
if isinstance(results, dict):
demisto.info("got results as dictionary")
new_record = {}
demisto.info("start looping over results")
for key, value in results.items():
if key not in fields_to_remove:
demisto.debug(f'add this key: "{key}" to filtered results')
if isinstance(value, dict):
demisto.debug(
f"found {value} is a dict, calling this function again"
)
value = filter_results(value, fields_to_remove, remove_signs)
demisto.info("searching not allowed signs to remove")
for sign in remove_signs:
if sign in key:
demisto.debug(
f'found "{sign}" in the next key: "{key}". remove it.'
)
new_record[key.replace(sign, "")] = value
demisto.debug("finish remove it")
else:
demisto.debug(
f"not allowed signs were not found. add the next key to filter results: {key}"
)
new_record[key] = value
demisto.info("finish remove not allowed signs in results keys")
demisto.info("finish looping over results")
return new_record
else:
demisto.info("got results as list")
for record in results:
new_record = {}
for key, value in record.items():
if key not in fields_to_remove:
if isinstance(value, dict):
value = filter_results(value, fields_to_remove, remove_signs)
for sign in remove_signs:
if sign in key:
new_record[key.replace(sign, "")] = value
else:
new_record[key] = value
new_results.append(new_record)
return new_results
def attribute_pairs_to_dict(attrs_str: Optional[str], delim_char: str = ","):
"""
Transforms a string of multiple inputs to a dictionary list
:param attrs_str: attributes separated by key=val pairs sepearated by ','
:param delim_char: delimiter character between atrribute pairs
:return:
"""
if not attrs_str:
return attrs_str
demisto.info("start convert string of multiple inputs to a dictionary")
attrs = {}
regex = re.compile(r"(.*)=(.*)")
demisto.info("start looping over the found keys and values")
demisto.debug(f"start looping over the next found keys and values: {regex}")
for f in attrs_str.split(delim_char):
match = regex.match(f)
if match is None:
raise ValueError(f"Could not parse field: {f}")
demisto.debug(
f"add this key: {match.group(1)} and this value: {match.group(2)} to attrs"
)
attrs.update({match.group(1): match.group(2)})
demisto.debug(
f"finish adding this key: {match.group(1)} and this value: {match.group(2)} to attrs"
)
return attrs
def convert_str_to_int(val_to_convert: Optional[str], param_name: str):
"""
This function get a parameter from Demisto as string and try converting it to integer
:param val_to_convert: the value to convert
:param param_name: string of the parameter name that is trying to be converted.
:return: the converted value
"""
demisto.info(f"start converting {val_to_convert} to integer")
if val_to_convert:
try:
return int(val_to_convert)
except ValueError:
raise DemistoException(f"'{param_name}' most be a number.")
except Exception:
demisto.error(f"failed to convert {val_to_convert} to integer")
raise
demisto.info(f"finish converting {val_to_convert} to integer")
def calc_run_at_time(selected_time: str) -> str:
"""
This function gets a specified time(1 hour, 1 day, 1 year) and returns the selected time in ISO-8601 format
:param selected_time: the delta you want to get from today:
'1 hour': for one hour from now
'1 week': for one week from now
'1 day': for one day from now
:return: the selected time in ISO-8601 format.
"""
selected_iso_time = ""
if not selected_time:
return selected_iso_time
iso_with_timezone = parse(f"in {selected_time} UTC").isoformat()
return iso_with_timezone
def collect_headers(entries_list: list) -> list:
"""
This function collect all keys in a list of dictionaries
:param entries_list: list of dictionaries
:return: list of all keys formatted
"""
headers = [""]
for entry in entries_list:
for key, value in entry.items():
if key == "log":
headers[0] = "log"
headers.append(key.replace("_", " "))
if not headers[0]:
return headers[1:]
return headers
def collect_log_from_output(entries: list) -> list:
logs_entry = []
for entry in entries:
if entry["type"] == "log":
logs_entry.append(entry)
return logs_entry
""" COMMAND FUNCTIONS """
def job_retry_command(client: Client, args: dict):
arg_string: str = args.get("arg_string", "")
log_level: str = args.get("log_level", "")
as_user: str = args.get("as_user", "")
failed_nodes: str = args.get("failed_nodes", "")
job_id: str = args.get("job_id", "")
execution_id: str = args.get("execution_id", "")
options: str = args.get("options", "")
converted_options: dict = attribute_pairs_to_dict(options)
result = client.retry_job(
job_id,
arg_string,
log_level,
as_user,
failed_nodes,
execution_id,
converted_options,
)
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected output from api: {result}")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
headers = [key.replace("-", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Execute Job:", filtered_results, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecutedJobs",
outputs=filtered_results,
outputs_key_field="id",
)
def execute_job_command(client: Client, args: dict):
arg_string: str = args.get("arg_string", "")
log_level: str = args.get("log_level", "")
as_user: str = args.get("as_user", "")
node_filter: str = args.get("filter", "")
run_at_time: str = calc_run_at_time(args.get("run_at_time", ""))
run_at_time_raw: str = args.get("run_at_time_raw", "")
options: str = args.get("options", "")
job_id: str = args.get("job_id", "")
converted_options: dict = attribute_pairs_to_dict(options)
demisto.info("sending execute job request")
result = client.execute_job(
job_id,
arg_string,
log_level,
as_user,
node_filter,
run_at_time,
converted_options,
run_at_time_raw,
)
demisto.info("finish sending execute job request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected output from api: {result}")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
headers = [key.replace("-", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Execute Job:", filtered_results, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecutedJobs",
outputs=filtered_results,
outputs_key_field="id",
)
def project_list_command(client: Client):
"""
This function returns a list of all existing projects.
:param client: Demisto client
:return: CommandResults object
"""
demisto.info("sending get project list request")
result = client.get_project_list()
demisto.info("finish get project list request")
if not isinstance(result, list):
raise DemistoException(f"Got unexpected output from api: {result}")
filtered_results = filter_results(result, ["url"], ["-"])
headers = [key.replace("_", " ") for key in [*filtered_results[0].keys()]]
readable_output = tableToMarkdown(
"Projects List:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Projects",
outputs=filtered_results,
outputs_key_field="name",
)
def jobs_list_command(client: Client, args: dict):
"""
This function returns a list of all existing jobs.
:param client: Demisto client
:param args: command's arguments
:return: CommandResults object
"""
id_list: list = argToList(args.get("id_list", []))
group_path: str = args.get("group_path", "")
job_filter: str = args.get("job_filter", "")
job_exec_filter: str = args.get("job_exec_filter", "")
group_path_exact: str = args.get("group_path_exact", "")
scheduled_filter: str = args.get("scheduled_filter", "")
server_node_uuid_filter: str = args.get("server_node_uuid_filter", "")
max_results: Optional[int] = convert_str_to_int(
args.get("max_results", ""), "max_results"
)
project_name: str = args.get("project_name", "")
demisto.info("sending get jobs list request")
result = client.get_jobs_list(
id_list,
group_path,
job_filter,
job_exec_filter,
group_path_exact,
scheduled_filter,
server_node_uuid_filter,
project_name,
)
demisto.info("finish sending get jobs list request")
if not isinstance(result, list):
raise DemistoException(f"Got unexpected output from api: {result}")
if result:
max_entries: list = result[:max_results] if max_results else result[
:MAX_RESULTS
]
filtered_results = filter_results(max_entries, ["href", "permalink"], ["-"])
headers = [key.replace("_", " ") for key in [*filtered_results[0].keys()]]
readable_output = tableToMarkdown(
"Jobs List:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
else:
filtered_results = result
readable_output = "No results were found"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Jobs",
outputs=filtered_results,
outputs_key_field="id",
)
def webhooks_list_command(client: Client, args: dict):
"""
This function returns a list of all existing webhooks.
:param client: Demisto client
:return: CommandResults object
"""
project_name: str = args.get("project_name", "")
max_results: Optional[int] = convert_str_to_int(args.get('max_results', ''), 'max_results')
demisto.info("sending get webhooks list request")
result = client.get_webhooks_list(project_name)
demisto.info("finish sending get webhooks list request")
if not isinstance(result, list):
raise DemistoException(f"Got unexpected output from api: {result}")
headers = [key.replace("_", " ") for key in [*result[0].keys()]]
returned_results = result[:max_results] if max_results else result[:MAX_RESULTS]
readable_output = tableToMarkdown(
"Webhooks List:", result, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Webhooks",
outputs=returned_results,
outputs_key_field="id",
)
def job_execution_query_command(client: Client, args: dict):
"""
This function returns a list of all existing executions.
:param client: Demisto client
:param args: command's arguments
:return: CommandResults object
"""
status_filter: str = args.get("status_filter", "")
aborted_by_filter: str = args.get("aborted_by_filter", "")
user_filter: str = args.get("user_filter", "")
recent_filter: str = args.get("recent_filter", "")
older_filter: str = args.get("older_filter", "")
begin: str = args.get("begin", "")
end: str = args.get("end", "")
adhoc: str = args.get("adhoc", "")
job_id_list_filter: list = argToList(args.get("job_id_list_filter", []))
exclude_job_id_list_filter: list = argToList(
args.get("exclude_job_id_list_filter", [])
)
job_list_filter: list = argToList(args.get("job_list_filter", []))
exclude_job_list_filter: list = argToList(args.get("exclude_job_list_filter", []))
group_path: str = args.get("group_path", "")
group_path_exact: str = args.get("group_path_exact", "")
exclude_group_path_exact: str = args.get("exclude_group_path_exact", "")
job_filter: str = args.get("job_filter", "")
exclude_job_filter: str = args.get("exclude_job_filter", "")
job_exact_filter: str = args.get("job_exact_filter", "")
exclude_job_exact_filter: str = args.get("exclude_job_exact_filter", "")
execution_type_filter: str = args.get("execution_type_filter", "")
max_results: Optional[int] = convert_str_to_int(args.get("max_results"), "max")
offset: Optional[int] = convert_str_to_int(args.get("offset"), "offset")
project_name: str = args.get("project_name", "")
exclude_group_path: str = args.get("exclude_group_path", "")
demisto.info("sending job execution query request")
result = client.job_execution_query(
status_filter,
aborted_by_filter,
user_filter,
recent_filter,
older_filter,
begin,
end,
adhoc,
job_id_list_filter,
exclude_job_id_list_filter,
job_list_filter,
exclude_job_list_filter,
group_path,
group_path_exact,
exclude_group_path,
exclude_group_path_exact,
job_filter,
exclude_job_filter,
job_exact_filter,
exclude_job_exact_filter,
execution_type_filter,
max_results,
offset,
project_name,
)
demisto.info("finish sending job execution query request")
if not isinstance(result, dict):
raise DemistoException(f"got unexpected results from api: {result}")
executions: list = result.get("executions", [])
demisto.info("start filter results from the api")
filtered_executions = filter_results(executions, ["href", "permalink"], ["-"])
demisto.info("finish filter results from the api")
if isinstance(filtered_executions, list):
headers = [key.replace("_", " ") for key in [*filtered_executions[0].keys()]]
else:
raise DemistoException(f"Got unexpected results from the api: {result}")
readable_output = tableToMarkdown(
f'Job Execution Query - got total results: {result.get("paging",{}).get("total")}',
filtered_executions,
headers=headers,
headerTransform=pascalToSpace,
)
result["executions"] = filtered_executions
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecutionsQuery",
outputs=result,
outputs_key_field="id",
)
def job_execution_output_command(client: Client, args: dict):
"""
This function gets metadata regarding workflow state
:param client: demisto client object
:param args: command's arguments
:return: CommandRusult object
"""
execution_id: Optional[int] = convert_str_to_int(
args.get("execution_id"), "execution_id"
)
return_full_output: bool = argToBoolean(args.get("return_full_output", False))
max_results: Optional[int] = convert_str_to_int(
args.get("max_results", ""), "max_results"
)
aggregate_log: bool = argToBoolean(args.get("aggregate_log", False))
demisto.info("sending job execution output request")
result: dict = client.job_execution_output(execution_id) # type: ignore
demisto.info("finish sending job execution output request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
headers_general = [key.replace("_", " ") for key in [*result.keys()]]
readable_output_general = tableToMarkdown(
"Job Execution Output:",
result,
headers=headers_general,
headerTransform=pascalToSpace,
)
if result["entries"]:
result["entries"] = result["entries"][:max_results] if max_results else result["entries"][:MAX_RESULTS]
readable_output_entries = tableToMarkdown(
"Job Execution Entries View:",
result["entries"],
headers=collect_headers(result["entries"]),
headerTransform=pascalToSpace,
)
if aggregate_log:
result["logEntries"] = collect_log_from_output(result["entries"])
human_readable = readable_output_general + readable_output_entries
else:
human_readable = readable_output_general
if return_full_output:
return fileResult(args.get("execution_id"), json.dumps(result))
else:
return CommandResults(
readable_output=human_readable,
outputs_prefix="Rundeck.ExecutionsOutput",
outputs=result,
outputs_key_field="id",
)
def job_execution_abort_command(client: Client, args: dict):
"""
This function abort an active execution
:param client: demisto client object
:param args: command's arguments
:return: CommandRusult object
"""
execution_id: Optional[int] = convert_str_to_int(
args.get("execution_id"), "execution_id"
)
demisto.info("sending job execution abort request")
result = client.job_execution_abort(execution_id) # type: ignore
demisto.info("finish sending job execution abort request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
demisto.info("start filter results from the api")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
demisto.info("finish filter results from the api")
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Job Execution Abort:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Aborted",
outputs=filtered_results,
outputs_key_field="id",
)
def adhoc_run_command(client: Client, args: dict):
project_name: str = args.get("project_name", "")
exec_command: str = args.get("exec_command", "")
node_thread_count: str = args.get("node_thread_count", "")
node_keepgoing: str = args.get("node_keepgoing", "")
as_user: str = args.get("as_user", "")
node_filter: str = args.get("node_filter", "")
demisto.info("sending adhoc run request")
result = client.adhoc_run(
project_name,
exec_command,
node_thread_count,
node_keepgoing,
as_user,
node_filter,
)
demisto.info("finish sending adhoc run request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
demisto.info("start filter results from the api")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
demisto.info("finish filter results from the api")
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Adhoc Run:", filtered_results, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecuteCommand",
outputs=filtered_results,
outputs_key_field="id",
)
def adhoc_script_run_command(client: Client, args: dict):
project_name: str = args.get("project_name", "")
arg_string: str = args.get("arg_string", "")
node_thread_count: str = args.get("node_thread_count", "")
node_keepgoing: str = args.get("node_keepgoing", "")
as_user: str = args.get("as_user", "")
script_interpreter: str = args.get("script_interpreter", "")
interpreter_args_quoted: str = args.get("interpreter_args_quoted", "")
file_extension: str = args.get("file_extension", "")
node_filter: str = args.get("node_filter", "")
entry_id: str = args.get("entry_id", "")
demisto.info("sending adhoc script run request")
result = client.adhoc_script_run(
project_name,
arg_string,
node_thread_count,
node_keepgoing,
as_user,
node_filter,
script_interpreter,
interpreter_args_quoted,
file_extension,
entry_id,
)
demisto.info("finish sending adhoc script run request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
demisto.info("start filter results from the api")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
demisto.info("finish filter results from the api")
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Adhoc Run Script:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecuteScriptFile",
outputs=filtered_results,
outputs_key_field="id",
)
def adhoc_script_run_from_url_command(client: Client, args: dict):
project_name: str = args.get("project_name", "")
script_url: str = args.get("script_url", "")
node_thread_count: str = args.get("node_thread_count", "")
node_keepgoing: str = args.get("node_keepgoing", "")
as_user: str = args.get("as_user", "")
script_interpreter: str = args.get("script_interpreter", "")
interpreter_args_quoted: str = args.get("interpreter_args_quoted", "")
file_extension: str = args.get("file_extension", "")
node_filter: str = args.get("node_filter", "")
arg_string: str = args.get("arg_string", "")
result = client.adhoc_script_run_from_url(
project_name,
script_url,
node_thread_count,
node_keepgoing,
as_user,
node_filter,
script_interpreter,
interpreter_args_quoted,
file_extension,
arg_string,
)
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Adhoc Run Script From Url:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ScriptExecutionFromUrl",
outputs=filtered_results,
outputs_key_field="id",
)
def webhook_event_send_command(client: Client, args: dict):
auth_token = args.get("auth_token", "")
options: str = args.get("options", "")
free_json: str = args.get("json", "")
options_as_dict: dict = attribute_pairs_to_dict(options)
try:
demisto.info('start convert "options" argument to str')
if options_as_dict:
options_as_str: str = json.dumps(options_as_dict)
else:
options_as_str = free_json
demisto.info('finish convert "options" argument to str')
except Exception as e:
raise DemistoException(
f'There was a problem converting "json" to json. The reason is: {e}'
)
result = client.webhook_event_send(auth_token, options_as_str, free_json)
headers = [key.replace("_", " ") for key in [*result.keys()]]
readable_output = tableToMarkdown(
"Webhook event send:", result, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.WebhookEvent",
outputs=result,
outputs_key_field="id",
)
def test_module(client: Client, project_name: Optional[str]) -> str:
try:
projects_list = client.get_project_list()
except DemistoException as e:
if "unauthorized" in str(e):
return "Authorization Error: make sure your token is correctly set"
else:
raise e
else:
if project_name:
for project in projects_list:
if project_name == project.get("name"):
return "ok"
return (
f'Could not find the next project: "{project_name}"'
f". please enter another one or delete it completely."
)
else:
return "ok"
""" MAIN FUNCTION """
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params: dict = demisto.params()
token: str = params.get("token", "")
project_name: str = params.get("project_name", "")
# get the service API url
base_url: str = urljoin(demisto.params()["url"], f"/api/{VERSION}")
verify_certificate = not demisto.params().get("insecure", False)
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get("proxy", False)
args: Dict = demisto.args()
demisto.debug(f"Command being called is {demisto.command()}")
try:
headers = {"Accept": "application/json", "Content-Type": "application/json"}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy,
params={"authtoken": f"{token}"},
project_name=project_name,
)
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
result = test_module(client, project_name)
return_results(result)
elif demisto.command() == "rundeck-projects-list":
result = project_list_command(client)
return_results(result)
elif demisto.command() == "rundeck-jobs-list":
result = jobs_list_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-webhooks-list":
result = webhooks_list_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-job-execute":
result = execute_job_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-job-retry":
result = job_retry_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-job-executions-query":
result = job_execution_query_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-job-execution-output":
result = job_execution_output_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-job-execution-abort":
result = job_execution_abort_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-adhoc-command-run":
result = adhoc_run_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-adhoc-script-run":
result = adhoc_script_run_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-adhoc-script-run-from-url":
result = adhoc_script_run_from_url_command(client, args)
return_results(result)
elif demisto.command() == "rundeck-webhook-event-send":
result = webhook_event_send_command(client, args)
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
error_msg = str(e).replace("\\n", "\n")
return_error(
f"Failed to execute {demisto.command()} command.\n Error:\n {error_msg}"
)
""" ENTRY POINT """
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| |
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""Simple wrapper around SQLalchemy
This module hides the complexity of SQLAlchemy to provide a simple interface to
store and manipulate Python objects each with a set of properties. Unlike the
default behaviour of sqlalchemy's declaritive_base, inheritance of objects will
not require "join", rather it creates a separate table. This makes it easy to
use objects around from parts of not-so-related applications.
For example, a ``SourcePackage`` table is created by Grail. Then, PyPM will
extend it as ``BinaryPackage`` which gets extended to ``RepoPackage``. The table
for RepoPackage will be concretely inherited, meaning - there will be just be
one table without having to 'join' to another SourcePackage table.
At the moment, PyPM and Grail use this module. It may not be of use to others,
and we may change the api/behaviour. Hence, it makes sense to keep it as an
internal module.
"""
import sys
import os
from os.path import exists, dirname
from contextlib import contextmanager
import json
from sqlalchemy import Table, Column, MetaData
from sqlalchemy import create_engine
from sqlalchemy.types import String, Text, Boolean, PickleType
from sqlalchemy.orm import sessionmaker, scoped_session, mapper
# A PickleType that will work on both Python 2.x and 3.x
# i.e., if you *write* to a DB entry using Python 3.x, we are letting
# Python 3.x apps to read from it as well.
# WARNING: Ideally, if you are starting a new project, please
# use something else like JSON. See
# http://twitter.com/zzzeek/status/9765871731867648
Pickle2Type = PickleType(protocol=2)
def setup(db_class, simple_object_cls, primary_keys):
"""A simple API to configure the metadata"""
table_name = simple_object_cls.__name__
column_names = simple_object_cls.FIELDS
metadata = MetaData()
table = Table(table_name, metadata,
*[Column(cname, _get_best_column_type(cname),
primary_key=cname in primary_keys)
for cname in column_names])
db_class.metadata = metadata
db_class.mapper_class = simple_object_cls
db_class.table = table
mapper(simple_object_cls, table)
def sqlalchemy_escape(val, escape_char, special_chars):
"""Escape a string according for use in LIKE operator
>>> sqlalchemy_escape("text_table", "\\", "%_")
'text\_table'
"""
if sys.version_info[:2] >= (3, 0):
assert isinstance(val, str)
else:
assert isinstance(val, basestring)
result = []
for c in val:
if c in special_chars + escape_char:
result.extend(escape_char + c)
else:
result.extend(c)
return ''.join(result)
class SimpleDatabase(object):
metadata = None # to be set up derived classes
class DoesNotExist(IOError):
def __init__(self, path):
super(IOError, self).__init__(
'database file %s does not exist' % path)
def __init__(self, path, touch=False):
"""
touch - create database, if it does not exist
"""
self.path = path
sqlite_uri = 'sqlite:///%s' % self.path
self.engine = create_engine(sqlite_uri, echo=False)
self.create_session = sessionmaker(
bind=self.engine,
autocommit=False,
# See the comment by Michael Bayer
# http://groups.google.com/group/sqlalchemy/browse_thread/thread/7c1eb642435adde7
# expire_on_commit=False
)
self.create_scoped_session = scoped_session(self.create_session)
if not exists(self.path):
if touch:
assert exists(dirname(self.path)), 'missing: ' + dirname(self.path)
self.metadata.create_all(self.engine)
else:
raise self.DoesNotExist(path)
def reset(self):
"""Reset the database
Drop all tables and recreate them
"""
self.metadata.drop_all(self.engine)
self.metadata.create_all(self.engine)
def close(self):
self.engine.dispose()
@contextmanager
def transaction(self, session=None):
"""Start a new transaction based on the passed session object. If session
is not passed, then create one and make sure of closing it finally.
"""
local_session = None
if session is None:
local_session = session = self.create_scoped_session()
try:
yield session
finally:
# Since ``local_session`` was created locally, close it here itself
if local_session is not None:
# but wait!
# http://groups.google.com/group/sqlalchemy/browse_thread/thread/7c1eb642435adde7
# To workaround this issue with sqlalchemy, we can either:
# 1) pass the session object explicitly
# 2) do not close the session at all (bad idea - could lead to memory leaks)
#
# Till pypm implements atomic transations in client.installer,
# we retain this hack (i.e., we choose (2) for now)
pass # local_session.close()
def __str__(self):
return '{0.__class__.__name__}<{0.path}>'.format(self)
class SimpleObject(object):
"""Object with a collection of fields.
The following features are supported:
1) Automatically initialize the fields in __init__
2) Inherit and extend with additional fields
2) Ability to convert from other object types (with extra/less fields)
3) Interoperate with sqlalchemy.orm (i.e., plain `self.foo=value` works)
"""
# Public fields in this object
FIELDS = []
def __init__(self, **kwargs):
"""Initialize the object with FIELDS whose values are in ``kwargs``"""
self.__assert_field_mapping(kwargs)
for field in self.FIELDS:
setattr(self, field, kwargs[field])
@classmethod
def create_from(cls, another, **kwargs):
"""Create from another object of different type.
Another object must be from a derived class of SimpleObject (which
contains FIELDS)
"""
reused_fields = {}
for field, value in another.get_fields():
if field in cls.FIELDS:
reused_fields[field] = value
reused_fields.update(kwargs)
return cls(**reused_fields)
def get_fields(self):
"""Return fields as a list of (name,value)"""
for field in self.FIELDS:
yield field, getattr(self, field)
def to_dict(self):
return dict(self.get_fields())
def to_json(self):
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_string):
values = json.loads(json_string)
return cls(**_remove_unicode_keys(values))
def __assert_field_mapping(self, mapping):
"""Assert that mapping.keys() == FIELDS.
The programmer is not supposed to pass extra/less number of fields
"""
passed_keys = set(mapping.keys())
class_fields = set(self.FIELDS)
if passed_keys != class_fields:
raise ValueError('\n'.join([
"{0} got different fields from expected".format(
self.__class__),
" got : {0}".format(list(sorted(passed_keys))),
" expected: {0}".format(list(sorted(class_fields)))]))
class _get_best_column_type():
"""Return the best column type for the given name."""
mapping = dict(
name = String,
version = String,
keywords = String,
home_page = String,
license = String,
author = String,
author_email = String,
maintainer = String,
maintainer_email = String,
osarch = String,
pyver = String,
pkg_version = String,
relpath = String,
tags = String,
original_source = String,
patched_source = String,
summary = Text,
description = Text,
python3 = Boolean,
metadata_hash = String,
install_requires = Pickle2Type,
files_list = Pickle2Type,
)
def __call__(self, name):
try:
return self.mapping[name]
except KeyError:
raise KeyError(
'missing key. add type for "{0}" in self.mapping'.format(
name))
_get_best_column_type = _get_best_column_type()
def _remove_unicode_keys(dictobj):
"""Convert keys from 'unicode' to 'str' type.
workaround for <http://bugs.python.org/issue2646>
"""
if sys.version_info[:2] >= (3, 0): return dictobj
assert isinstance(dictobj, dict)
newdict = {}
for key, value in dictobj.items():
if type(key) is unicode:
key = key.encode('utf-8')
newdict[key] = value
return newdict
| |
from werkzeug.serving import make_server
from flask import Flask, render_template, request, current_app
from Utilities import LogThread
import threading
import time
import socket
import sqlite3
import os
import plistlib
import console
import shutil
import ui
from zipfile import ZipFile
from Managers import DBManager, TypeManager
app = Flask('myapp')
app.debug = True
@app.route('/')
def index():
return render_template('upload.html')
@app.route('/uploader', methods = ['GET', 'POST'])
def upload_f():
if request.method == 'POST':
f = request.files['file']
loc = current_app.config['fileuploaddir']
f.save(os.path.join(loc, f.filename))
return 'file uploaded successfully'
class ServerThread(threading.Thread):
def __init__(self, app, template_directory, file_upload_directory, port, callback):
threading.Thread.__init__(self)
app.config['fileuploaddir'] = file_upload_directory
app.template_folder = template_directory
self.srv = make_server('', port, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
self.srv.serve_forever()
def shutdown(self):
self.srv.shutdown()
class Transfer (object):
def __init__(self):
self.__name = ''
self.__status = ''
self.__path = None
self.__zipPath = None
self.__stats = ''
self.__id = ''
self.__image = None
@property
def name(self):
return self.__name
@name.setter
def name(self, data):
self.__name = data
@property
def status(self):
return self.__status
@status.setter
def status(self, data):
self.__status = data
@property
def path(self):
return self.__path
@path.setter
def path(self, data):
self.__path = data
@property
def zipPath(self):
return self.__zipPath
@zipPath.setter
def zipPath(self, data):
self.__zipPath = data
@property
def stats(self):
return self.__stats
@stats.setter
def stats(self, data):
self.__stats = data
@property
def id(self):
return self.__id
@id.setter
def id(self, id):
self.__id = id
@property
def image(self):
return self.__image
@image.setter
def image(self, data):
self.__image = data
class TransferManager (object):
def __init__(self, iconPath, typeIconPath):
self.server = None
self.running = False
self.typeManager = TypeManager.TypeManager(typeIconPath)
self.iconPath = iconPath
self.docsetFolder = 'Docsets/Transfer'
self.plistPath = 'Contents/Info.plist'
self.indexPath = 'Contents/Resources/docSet.dsidx'
self.installThreads = []
self.uiUpdateThreads = []
self.__installingDocsets = []
self.__createDocsetFolder()
def __createDocsetFolder(self):
if not os.path.exists(self.docsetFolder):
os.mkdir(self.docsetFolder)
def startTransferService(self, template_directory, file_upload_directory, port, callback):
self.server = ServerThread(app, template_directory, file_upload_directory, port, callback)
self.server.start()
self.running = True
ip = 'Unknown'
try:
ip = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return {'hostname':socket.gethostname(), 'port':port, 'ip':ip}
def stopTransferService(self, action):
self.server.shutdown()
self.server = None
self.running = False
action()
def __installDocset(self, docset, refresh):
extract_location = self.docsetFolder
docset.stats = 'Preparing to install: This might take a while.'
refresh()
zip = ZipFile(docset.zipPath, mode='r')
ll = [name for name in zip.namelist() if '.docset' in name]
if len(ll) > 0:
n = ll[0]
m = os.path.join(self.docsetFolder, n)
docset.stats = 'Preparing to extract'
refresh()
l = zip.infolist()
zip.extractall(path=extract_location, members = self.track_progress(l, docset, len(l), refresh))
zip.close()
os.remove(docset.zipPath)
plistpath = os.path.join(m, self.plistPath)
name = docset.name
image = ''
with open(plistpath, 'rb') as f:
pp = plistlib.load(f)
if 'CFBundleName' in pp.keys():
name = pp['CFBundleName']
if 'CFBundleIdentifier' in pp.keys():
image = pp['CFBundleIdentifier']
dbManager = DBManager.DBManager()
dbManager.DocsetInstalled(name, m, 'transfer', image, 0.0)
if docset in self.__installingDocsets:
self.__installingDocsets.remove(docset)
docset.status = 'Cleaning up...'
refresh()
cleanup_path = os.path.join(self.docsetFolder,'__MACOSX')
if os.path.exists(cleanup_path):
shutil.rmtree(cleanup_path)
docset.status = 'Installed'
refresh()
else:
raise Exception('Unknown docset structure')
def track_progress(self, members, docset, totalFiles, refresh):
i = 0
for member in members:
i = i + 1
done = 100 * i / totalFiles
docset.stats = 'installing: ' + str(round(done,2)) + '% ' + str(i) + ' / '+ str(totalFiles)
if i % 100 == 0:
refresh()
yield member
refresh()
def installDocset(self, docset, action, refresh_main_view):
self.__installingDocsets.append(docset)
docset.status = 'Installing'
action()
installThread = LogThread.LogThread(target=self.__installDocset, args=(docset,refresh_main_view,))
self.installThreads.append(installThread)
installThread.start()
# updateThread = LogThread.LogThread(target=self.updateUi, args=(action,installThread,))
# self.uiUpdateThreads.append(updateThread)
# updateThread.start()
def deleteDocset(self, docset, post_action, confirm = True):
but = 1
if confirm:
but = console.alert('Are you sure?', 'Would you like to delete the docset, ' + docset.name, 'Ok')
if but == 1:
dbmanager = DBManager.DBManager()
dbmanager.DocsetRemoved(docset.id)
shutil.rmtree(docset.path)
docset.status = 'Not Installed'
if not post_action == None:
post_action()
docset.path = None
def __getIconWithName(self, name):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, name+'.png')
if not os.path.exists(imgPath):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, 'Other.png')
return ui.Image.named(imgPath)
def __getAvailableDocsets(self):
d = []
for file in os.listdir(self.docsetFolder):
if file.endswith('.zip') or file.endswith('.ZIP'):
doc = Transfer()
doc.name = file
doc.status = 'Not Installed'
doc.zipPath = os.path.join(os.path.abspath('.'), self.docsetFolder, file)
d.append(doc)
return d
def __getInstallingDocsets(self):
return self.__installingDocsets
def __getInstalledDocsets(self):
ds = []
dbManager = DBManager.DBManager()
t = dbManager.InstalledDocsetsByType('transfer')
ds = []
for d in t:
aa = Transfer()
aa.name = d[1]
aa.id = d[0]
aa.path = os.path.join(os.path.abspath('.'),d[2])
aa.image = self.__getIconWithName(d[4])
aa.status = 'Installed'
# aa.version = d[5]
ds.append(aa)
return ds
def getInstalledDocsets(self):
return self.__getInstalledDocsets()
def getAvailableDocsets(self):
dic = {}
ava = self.__getAvailableDocsets()
ins = self.__getInstallingDocsets()
inst = self.getInstalledDocsets()
if len(ins) > 0:
for i in ins:
for a in ava:
if i.name == a.name:
a.status = 'Installing'
a.stats = i.stats
if len(ava) > 0:
dic['Available'] = ava
if len(inst) > 0:
dic['Installed'] = inst
return dic
def getIndexesbyTypeForDocset(self, docset, type):
indexes = []
path = docset.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE type = (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (type.name,))
data = c.fetchall()
conn.close()
dTypes ={}
type = None
for t in data:
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
indexes.append({'type':type, 'name':t[1],'path':t[2]})
return indexes
def getTypesForDocset(self, docset):
types = []
path = docset.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type FROM searchIndex GROUP BY type ORDER BY type COLLATE NOCASE'
c = conn.execute(sql)
data = c.fetchall()
conn.close()
for t in data:
types.append(self.typeManager.getTypeForName(t[0]))
return types
def updateUi(self, action, t):
while t.is_alive():
action()
time.sleep(0.5)
action()
def getIndexesbyNameForAllDocsets(self, name):
if name == None or name == '':
return {}
else:
docsets = self.getInstalledDocsets()
indexes = {}
for d in docsets:
ind = self.getIndexesbyNameForDocsetSearch(d, name)
for k in ind:
if not k in indexes.keys():
indexes[k] = []
indexes[k].extend(ind[k])
return indexes
def getIndexesbyNameForDocsetSearch(self, docset, name):
if name == None or name == '':
return []
else:
ind = {}
path = docset.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (name, ))
data = {'first' : c.fetchall()}
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) AND name NOT LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (name.replace(' ','%'), name, ))
data['second'] = c.fetchall()
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) AND name NOT LIKE (?) AND name NOT LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (name.replace(' ','%')+'%', name.replace(' ','%'), name, ))
data['third'] = c.fetchall()
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) AND name NOT LIKE (?) AND name NOT LIKE (?) AND name NOT LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, ('%'+name.replace(' ','%')+'%',name.replace(' ','%')+'%',name.replace(' ','%'), name, ))
data['fourth'] = c.fetchall()
conn.close()
dTypes = {}
for k in data:
ind[k] = []
for t in data[k]:
url = 'file://' + os.path.join(path, 'Contents/Resources/Documents', t[2])
url = url.replace(' ', '%20').replace('<', '%3E').replace('>', '%3C')
type = None
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
ind[k].append({'name':t[1], 'path':url, 'icon':None,'docsetname':docset.name,'type':type, 'callbackOverride':'', 'docset': docset})
return ind
if __name__ == '__main__':
tm = TransferManager()
print(tm.startTransferService('../Resources', '.', 8080))
time.sleep(15)
tm.stopTransferService()
print('stopped')
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import strutils
import six.moves.urllib.parse as urlparse
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
import nova.context
from nova import exception
from nova.i18n import _
from nova import objects
from nova import quota
from nova import utils
QUOTAS = quota.QUOTAS
NON_QUOTA_KEYS = ['tenant_id', 'id', 'force']
# Quotas that are only enabled by specific extensions
EXTENDED_QUOTAS = {'server_groups': 'os-server-group-quotas',
'server_group_members': 'os-server-group-quotas'}
authorize_update = extensions.extension_authorizer('compute', 'quotas:update')
authorize_show = extensions.extension_authorizer('compute', 'quotas:show')
authorize_delete = extensions.extension_authorizer('compute', 'quotas:delete')
class QuotaSetsController(wsgi.Controller):
supported_quotas = []
def __init__(self, ext_mgr):
self.ext_mgr = ext_mgr
self.supported_quotas = QUOTAS.resources
for resource, extension in EXTENDED_QUOTAS.items():
if not self.ext_mgr.is_loaded(extension):
self.supported_quotas.remove(resource)
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict."""
if project_id:
result = dict(id=str(project_id))
else:
result = {}
for resource in self.supported_quotas:
if resource in quota_set:
result[resource] = quota_set[resource]
return dict(quota_set=result)
def _validate_quota_limit(self, resource, limit, minimum, maximum):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = (_("Quota limit %(limit)s for %(resource)s "
"must be -1 or greater.") %
{'limit': limit, 'resource': resource})
raise webob.exc.HTTPBadRequest(explanation=msg)
def conv_inf(value):
return float("inf") if value == -1 else value
if conv_inf(limit) < conv_inf(minimum):
msg = (_("Quota limit %(limit)s for %(resource)s must "
"be greater than or equal to already used and "
"reserved %(minimum)s.") %
{'limit': limit, 'resource': resource, 'minimum': minimum})
raise webob.exc.HTTPBadRequest(explanation=msg)
if conv_inf(limit) > conv_inf(maximum):
msg = (_("Quota limit %(limit)s for %(resource)s must be "
"less than or equal to %(maximum)s.") %
{'limit': limit, 'resource': resource, 'maximum': maximum})
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, user_id=None, usages=False):
if user_id:
values = QUOTAS.get_user_quotas(context, id, user_id,
usages=usages)
else:
values = QUOTAS.get_project_quotas(context, id, usages=usages)
if usages:
return values
else:
return dict((k, v['limit']) for k, v in values.items())
def show(self, req, id):
context = req.environ['nova.context']
authorize_show(context)
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = None
if self.ext_mgr.is_loaded('os-user-quotas'):
user_id = params.get('user_id', [None])[0]
try:
nova.context.authorize_project_context(context, id)
return self._format_quota_set(id,
self._get_quotas(context, id, user_id=user_id))
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
def update(self, req, id, body):
context = req.environ['nova.context']
authorize_update(context)
project_id = id
bad_keys = []
# By default, we can force update the quota if the extended
# is not loaded
force_update = True
extended_loaded = False
if self.ext_mgr.is_loaded('os-extended-quotas'):
# force optional has been enabled, the default value of
# force_update need to be changed to False
extended_loaded = True
force_update = False
user_id = None
if self.ext_mgr.is_loaded('os-user-quotas'):
# Update user quotas only if the extended is loaded
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
try:
settable_quotas = QUOTAS.get_settable_quotas(context, project_id,
user_id=user_id)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
if not self.is_valid_body(body, 'quota_set'):
msg = _("quota_set not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
quota_set = body['quota_set']
for key, value in quota_set.items():
if (key not in self.supported_quotas
and key not in NON_QUOTA_KEYS):
bad_keys.append(key)
continue
if key == 'force' and extended_loaded:
# only check the force optional when the extended has
# been loaded
force_update = strutils.bool_from_string(value)
elif key not in NON_QUOTA_KEYS and value:
try:
value = utils.validate_integer(value, key)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(
explanation=e.format_message())
if bad_keys:
msg = _("Bad key(s) %s in quota_set") % ",".join(bad_keys)
raise webob.exc.HTTPBadRequest(explanation=msg)
for key, value in quota_set.items():
if key in NON_QUOTA_KEYS or (not value and value != 0):
continue
# validate whether already used and reserved exceeds the new
# quota, this check will be ignored if admin want to force
# update
value = int(value)
if not force_update:
minimum = settable_quotas[key]['minimum']
maximum = settable_quotas[key]['maximum']
self._validate_quota_limit(key, value, minimum, maximum)
try:
objects.Quotas.create_limit(context, project_id,
key, value, user_id=user_id)
except exception.QuotaExists:
objects.Quotas.update_limit(context, project_id,
key, value, user_id=user_id)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
values = self._get_quotas(context, id, user_id=user_id)
return self._format_quota_set(None, values)
def defaults(self, req, id):
context = req.environ['nova.context']
authorize_show(context)
values = QUOTAS.get_defaults(context)
return self._format_quota_set(id, values)
def delete(self, req, id):
if self.ext_mgr.is_loaded('os-extended-quotas'):
context = req.environ['nova.context']
authorize_delete(context)
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
if user_id and not self.ext_mgr.is_loaded('os-user-quotas'):
raise webob.exc.HTTPNotFound()
try:
nova.context.authorize_project_context(context, id)
if user_id:
QUOTAS.destroy_all_by_project_and_user(context,
id, user_id)
else:
QUOTAS.destroy_all_by_project(context, id)
return webob.Response(status_int=202)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
raise webob.exc.HTTPNotFound()
class Quotas(extensions.ExtensionDescriptor):
"""Quotas management support."""
name = "Quotas"
alias = "os-quota-sets"
namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1"
updated = "2011-08-08T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(self.ext_mgr),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
| |
from __future__ import print_function, division
from sympy.functions import sqrt, sign, root
from sympy.core import S, sympify, Mul, Add, Expr
from sympy.core.function import expand_mul
from sympy.core.symbol import Dummy
from sympy.polys import Poly, PolynomialError
from sympy.core.function import count_ops, _mexpand
from sympy.utilities import default_sort_key
def is_sqrt(expr):
"""Return True if expr is a sqrt, otherwise False."""
return expr.is_Pow and expr.exp.is_Rational and abs(expr.exp) is S.Half
def sqrt_depth(p):
"""Return the maximum depth of any square root argument of p.
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import sqrt_depth
Neither of these square roots contains any other square roots
so the depth is 1:
>>> sqrt_depth(1 + sqrt(2)*(1 + sqrt(3)))
1
The sqrt(3) is contained within a square root so the depth is
2:
>>> sqrt_depth(1 + sqrt(2)*sqrt(1 + sqrt(3)))
2
"""
if p.is_Atom:
return 0
elif p.is_Add or p.is_Mul:
return max([sqrt_depth(x) for x in p.args], key=default_sort_key)
elif is_sqrt(p):
return sqrt_depth(p.base) + 1
else:
return 0
def is_algebraic(p):
"""Return True if p is comprised of only Rationals or square roots
of Rationals and algebraic operations.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import is_algebraic
>>> from sympy import cos
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*sqrt(2))))
True
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*cos(2))))
False
"""
if p.is_Rational:
return True
elif p.is_Atom:
return False
elif is_sqrt(p) or p.is_Pow and p.exp.is_Integer:
return is_algebraic(p.base)
elif p.is_Add or p.is_Mul:
return all(is_algebraic(x) for x in p.args)
else:
return False
def _subsets(n):
"""
Returns all possible subsets of the set (0, 1, ..., n-1) except the
empty set, listed in reversed lexicographical order according to binary
representation, so that the case of the fourth root is treated last.
Examples
========
>>> from sympy.simplify.sqrtdenest import _subsets
>>> _subsets(2)
[[1, 0], [0, 1], [1, 1]]
"""
if n == 1:
a = [[1]]
elif n == 2:
a = [[1, 0], [0, 1], [1, 1]]
elif n == 3:
a = [[1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
else:
b = _subsets(n - 1)
a0 = [x + [0] for x in b]
a1 = [x + [1] for x in b]
a = a0 + [[0]*(n - 1) + [1]] + a1
return a
def sqrtdenest(expr, max_iter=3):
"""Denests sqrts in an expression that contain other square roots
if possible, otherwise returns the expr unchanged. This is based on the
algorithms of [1].
Examples
========
>>> from sympy.simplify.sqrtdenest import sqrtdenest
>>> from sympy import sqrt
>>> sqrtdenest(sqrt(5 + 2 * sqrt(6)))
sqrt(2) + sqrt(3)
See Also
========
sympy.solvers.solvers.unrad
References
==========
[1] http://researcher.watson.ibm.com/researcher/files/us-fagin/symb85.pdf
[2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots
by Denesting' (available at http://www.cybertester.com/data/denest.pdf)
"""
expr = expand_mul(sympify(expr))
for i in range(max_iter):
z = _sqrtdenest0(expr)
if expr == z:
return expr
expr = z
return expr
def _sqrt_match(p):
"""Return [a, b, r] for p.match(a + b*sqrt(r)) where, in addition to
matching, sqrt(r) also has then maximal sqrt_depth among addends of p.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match
>>> _sqrt_match(1 + sqrt(2) + sqrt(2)*sqrt(3) + 2*sqrt(1+sqrt(5)))
[1 + sqrt(2) + sqrt(6), 2, 1 + sqrt(5)]
"""
from sympy.simplify.simplify import split_surds
p = _mexpand(p)
if p.is_Number:
res = (p, S.Zero, S.Zero)
elif p.is_Add:
pargs = sorted(p.args, key=default_sort_key)
if all((x**2).is_Rational for x in pargs):
r, b, a = split_surds(p)
res = a, b, r
return list(res)
# to make the process canonical, the argument is included in the tuple
# so when the max is selected, it will be the largest arg having a
# given depth
v = [(sqrt_depth(x), x, i) for i, x in enumerate(pargs)]
nmax = max(v, key=default_sort_key)
if nmax[0] == 0:
res = []
else:
# select r
depth, _, i = nmax
r = pargs.pop(i)
v.pop(i)
b = S.One
if r.is_Mul:
bv = []
rv = []
for x in r.args:
if sqrt_depth(x) < depth:
bv.append(x)
else:
rv.append(x)
b = Mul._from_args(bv)
r = Mul._from_args(rv)
# collect terms comtaining r
a1 = []
b1 = [b]
for x in v:
if x[0] < depth:
a1.append(x[1])
else:
x1 = x[1]
if x1 == r:
b1.append(1)
else:
if x1.is_Mul:
x1args = list(x1.args)
if r in x1args:
x1args.remove(r)
b1.append(Mul(*x1args))
else:
a1.append(x[1])
else:
a1.append(x[1])
a = Add(*a1)
b = Add(*b1)
res = (a, b, r**2)
else:
b, r = p.as_coeff_Mul()
if is_sqrt(r):
res = (S.Zero, b, r**2)
else:
res = []
return list(res)
class SqrtdenestStopIteration(StopIteration):
pass
def _sqrtdenest0(expr):
"""Returns expr after denesting its arguments."""
if is_sqrt(expr):
n, d = expr.as_numer_denom()
if d is S.One: # n is a square root
if n.base.is_Add:
args = sorted(n.base.args, key=default_sort_key)
if len(args) > 2 and all((x**2).is_Integer for x in args):
try:
return _sqrtdenest_rec(n)
except SqrtdenestStopIteration:
pass
expr = sqrt(_mexpand(Add(*[_sqrtdenest0(x) for x in args])))
return _sqrtdenest1(expr)
else:
n, d = [_sqrtdenest0(i) for i in (n, d)]
return n/d
if isinstance(expr, Expr):
args = expr.args
if args:
return expr.func(*[_sqrtdenest0(a) for a in args])
return expr
def _sqrtdenest_rec(expr):
"""Helper that denests the square root of three or more surds.
It returns the denested expression; if it cannot be denested it
throws SqrtdenestStopIteration
Algorithm: expr.base is in the extension Q_m = Q(sqrt(r_1),..,sqrt(r_k));
split expr.base = a + b*sqrt(r_k), where `a` and `b` are on
Q_(m-1) = Q(sqrt(r_1),..,sqrt(r_(k-1))); then a**2 - b**2*r_k is
on Q_(m-1); denest sqrt(a**2 - b**2*r_k) and so on.
See [1], section 6.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrtdenest_rec
>>> _sqrtdenest_rec(sqrt(-72*sqrt(2) + 158*sqrt(5) + 498))
-sqrt(10) + sqrt(2) + 9 + 9*sqrt(5)
>>> w=-6*sqrt(55)-6*sqrt(35)-2*sqrt(22)-2*sqrt(14)+2*sqrt(77)+6*sqrt(10)+65
>>> _sqrtdenest_rec(sqrt(w))
-sqrt(11) - sqrt(7) + sqrt(2) + 3*sqrt(5)
"""
from sympy.simplify.simplify import radsimp, split_surds, rad_rationalize
if not expr.is_Pow:
return sqrtdenest(expr)
if expr.base < 0:
return sqrt(-1)*_sqrtdenest_rec(sqrt(-expr.base))
g, a, b = split_surds(expr.base)
a = a*sqrt(g)
if a < b:
a, b = b, a
c2 = _mexpand(a**2 - b**2)
if len(c2.args) > 2:
g, a1, b1 = split_surds(c2)
a1 = a1*sqrt(g)
if a1 < b1:
a1, b1 = b1, a1
c2_1 = _mexpand(a1**2 - b1**2)
c_1 = _sqrtdenest_rec(sqrt(c2_1))
d_1 = _sqrtdenest_rec(sqrt(a1 + c_1))
num, den = rad_rationalize(b1, d_1)
c = _mexpand(d_1/sqrt(2) + num/(den*sqrt(2)))
else:
c = _sqrtdenest1(sqrt(c2))
if sqrt_depth(c) > 1:
raise SqrtdenestStopIteration
ac = a + c
if len(ac.args) >= len(expr.args):
if count_ops(ac) >= count_ops(expr.base):
raise SqrtdenestStopIteration
d = sqrtdenest(sqrt(ac))
if sqrt_depth(d) > 1:
raise SqrtdenestStopIteration
num, den = rad_rationalize(b, d)
r = d/sqrt(2) + num/(den*sqrt(2))
r = radsimp(r)
return _mexpand(r)
def _sqrtdenest1(expr, denester=True):
"""Return denested expr after denesting with simpler methods or, that
failing, using the denester."""
from sympy.simplify.simplify import radsimp
if not is_sqrt(expr):
return expr
a = expr.base
if a.is_Atom:
return expr
val = _sqrt_match(a)
if not val:
return expr
a, b, r = val
# try a quick numeric denesting
d2 = _mexpand(a**2 - b**2*r)
if d2.is_Rational:
if d2.is_positive:
z = _sqrt_numeric_denest(a, b, r, d2)
if z is not None:
return z
else:
# fourth root case
# sqrtdenest(sqrt(3 + 2*sqrt(3))) =
# sqrt(2)*3**(1/4)/2 + sqrt(2)*3**(3/4)/2
dr2 = _mexpand(-d2*r)
dr = sqrt(dr2)
if dr.is_Rational:
z = _sqrt_numeric_denest(_mexpand(b*r), a, r, dr2)
if z is not None:
return z/root(r, 4)
else:
z = _sqrt_symbolic_denest(a, b, r)
if z is not None:
return z
if not denester or not is_algebraic(expr):
return expr
res = sqrt_biquadratic_denest(expr, a, b, r, d2)
if res:
return res
# now call to the denester
av0 = [a, b, r, d2]
z = _denester([radsimp(expr**2)], av0, 0, sqrt_depth(expr))[0]
if av0[1] is None:
return expr
if z is not None:
if sqrt_depth(z) == sqrt_depth(expr) and count_ops(z) > count_ops(expr):
return expr
return z
return expr
def _sqrt_symbolic_denest(a, b, r):
"""Given an expression, sqrt(a + b*sqrt(b)), return the denested
expression or None.
Algorithm:
If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with
(y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and
(cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as
sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2).
Examples
========
>>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest
>>> from sympy import sqrt, Symbol
>>> from sympy.abc import x
>>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55
>>> _sqrt_symbolic_denest(a, b, r)
sqrt(-2*sqrt(29) + 11) + sqrt(5)
If the expression is numeric, it will be simplified:
>>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2)
>>> sqrtdenest(sqrt((w**2).expand()))
1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3)))
Otherwise, it will only be simplified if assumptions allow:
>>> w = w.subs(sqrt(3), sqrt(x + 3))
>>> sqrtdenest(sqrt((w**2).expand()))
sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2)
Notice that the argument of the sqrt is a square. If x is made positive
then the sqrt of the square is resolved:
>>> _.subs(x, Symbol('x', positive=True))
sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2)
"""
a, b, r = map(sympify, (a, b, r))
rval = _sqrt_match(r)
if not rval:
return None
ra, rb, rr = rval
if rb:
y = Dummy('y', positive=True)
try:
newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y)
except PolynomialError:
return None
if newa.degree() == 2:
ca, cb, cc = newa.all_coeffs()
cb += b
if _mexpand(cb**2 - 4*ca*cc).equals(0):
z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2)
if z.is_number:
z = _mexpand(Mul._from_args(z.as_content_primitive()))
return z
def _sqrt_numeric_denest(a, b, r, d2):
"""Helper that denest expr = a + b*sqrt(r), with d2 = a**2 - b**2*r > 0
or returns None if not denested.
"""
from sympy.simplify.simplify import radsimp
depthr = sqrt_depth(r)
d = sqrt(d2)
vad = a + d
# sqrt_depth(res) <= sqrt_depth(vad) + 1
# sqrt_depth(expr) = depthr + 2
# there is denesting if sqrt_depth(vad)+1 < depthr + 2
# if vad**2 is Number there is a fourth root
if sqrt_depth(vad) < depthr + 1 or (vad**2).is_Rational:
vad1 = radsimp(1/vad)
return (sqrt(vad/2) + sign(b)*sqrt((b**2*r*vad1/2).expand())).expand()
def sqrt_biquadratic_denest(expr, a, b, r, d2):
"""denest expr = sqrt(a + b*sqrt(r))
where a, b, r are linear combinations of square roots of
positive rationals on the rationals (SQRR) and r > 0, b != 0,
d2 = a**2 - b**2*r > 0
If it cannot denest it returns None.
ALGORITHM
Search for a solution A of type SQRR of the biquadratic equation
4*A**4 - 4*a*A**2 + b**2*r = 0 (1)
sqd = sqrt(a**2 - b**2*r)
Choosing the sqrt to be positive, the possible solutions are
A = sqrt(a/2 +/- sqd/2)
Since a, b, r are SQRR, then a**2 - b**2*r is a SQRR,
so if sqd can be denested, it is done by
_sqrtdenest_rec, and the result is a SQRR.
Similarly for A.
Examples of solutions (in both cases a and sqd are positive):
Example of expr with solution sqrt(a/2 + sqd/2) but not
solution sqrt(a/2 - sqd/2):
expr = sqrt(-sqrt(15) - sqrt(2)*sqrt(-sqrt(5) + 5) - sqrt(3) + 8)
a = -sqrt(15) - sqrt(3) + 8; sqd = -2*sqrt(5) - 2 + 4*sqrt(3)
Example of expr with solution sqrt(a/2 - sqd/2) but not
solution sqrt(a/2 + sqd/2):
w = 2 + r2 + r3 + (1 + r3)*sqrt(2 + r2 + 5*r3)
expr = sqrt((w**2).expand())
a = 4*sqrt(6) + 8*sqrt(2) + 47 + 28*sqrt(3)
sqd = 29 + 20*sqrt(3)
Define B = b/2*A; eq.(1) implies a = A**2 + B**2*r; then
expr**2 = a + b*sqrt(r) = (A + B*sqrt(r))**2
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match, sqrt_biquadratic_denest
>>> z = sqrt((2*sqrt(2) + 4)*sqrt(2 + sqrt(2)) + 5*sqrt(2) + 8)
>>> a, b, r = _sqrt_match(z**2)
>>> d2 = a**2 - b**2*r
>>> sqrt_biquadratic_denest(z, a, b, r, d2)
sqrt(2) + sqrt(sqrt(2) + 2) + 2
"""
from sympy.simplify.simplify import radsimp, rad_rationalize
if r <= 0 or d2 < 0 or not b or sqrt_depth(expr.base) < 2:
return None
for x in (a, b, r):
for y in x.args:
y2 = y**2
if not y2.is_Integer or not y2.is_positive:
return None
sqd = _mexpand(sqrtdenest(sqrt(radsimp(d2))))
if sqrt_depth(sqd) > 1:
return None
x1, x2 = [a/2 + sqd/2, a/2 - sqd/2]
# look for a solution A with depth 1
for x in (x1, x2):
A = sqrtdenest(sqrt(x))
if sqrt_depth(A) > 1:
continue
Bn, Bd = rad_rationalize(b, _mexpand(2*A))
B = Bn/Bd
z = A + B*sqrt(r)
if z < 0:
z = -z
return _mexpand(z)
return None
def _denester(nested, av0, h, max_depth_level):
"""Denests a list of expressions that contain nested square roots.
Algorithm based on <http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf>.
It is assumed that all of the elements of 'nested' share the same
bottom-level radicand. (This is stated in the paper, on page 177, in
the paragraph immediately preceding the algorithm.)
When evaluating all of the arguments in parallel, the bottom-level
radicand only needs to be denested once. This means that calling
_denester with x arguments results in a recursive invocation with x+1
arguments; hence _denester has polynomial complexity.
However, if the arguments were evaluated separately, each call would
result in two recursive invocations, and the algorithm would have
exponential complexity.
This is discussed in the paper in the middle paragraph of page 179.
"""
from sympy.simplify.simplify import radsimp
if h > max_depth_level:
return None, None
if av0[1] is None:
return None, None
if (av0[0] is None and
all(n.is_Number for n in nested)): # no arguments are nested
for f in _subsets(len(nested)): # test subset 'f' of nested
p = _mexpand(Mul(*[nested[i] for i in range(len(f)) if f[i]]))
if f.count(1) > 1 and f[-1]:
p = -p
sqp = sqrt(p)
if sqp.is_Rational:
return sqp, f # got a perfect square so return its square root.
# Otherwise, return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
else:
R = None
if av0[0] is not None:
values = [av0[:2]]
R = av0[2]
nested2 = [av0[3], R]
av0[0] = None
else:
values = list(filter(None, [_sqrt_match(expr) for expr in nested]))
for v in values:
if v[2]: # Since if b=0, r is not defined
if R is not None:
if R != v[2]:
av0[1] = None
return None, None
else:
R = v[2]
if R is None:
# return the radicand from the previous invocation
return sqrt(nested[-1]), [0]*len(nested)
nested2 = [_mexpand(v[0]**2) -
_mexpand(R*v[1]**2) for v in values] + [R]
d, f = _denester(nested2, av0, h + 1, max_depth_level)
if not f:
return None, None
if not any(f[i] for i in range(len(nested))):
v = values[-1]
return sqrt(v[0] + _mexpand(v[1]*d)), f
else:
p = Mul(*[nested[i] for i in range(len(nested)) if f[i]])
v = _sqrt_match(p)
if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested) - 1]:
v[0] = -v[0]
v[1] = -v[1]
if not f[len(nested)]: # Solution denests with square roots
vad = _mexpand(v[0] + d)
if vad <= 0:
# return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
if not(sqrt_depth(vad) <= sqrt_depth(R) + 1 or
(vad**2).is_Number):
av0[1] = None
return None, None
sqvad = _sqrtdenest1(sqrt(vad), denester=False)
if not (sqrt_depth(sqvad) <= sqrt_depth(R) + 1):
av0[1] = None
return None, None
sqvad1 = radsimp(1/sqvad)
res = _mexpand(sqvad/sqrt(2) + (v[1]*sqrt(R)*sqvad1/sqrt(2)))
return res, f
# sign(v[1])*sqrt(_mexpand(v[1]**2*R*vad1/2))), f
else: # Solution requires a fourth root
s2 = _mexpand(v[1]*R) + d
if s2 <= 0:
return sqrt(nested[-1]), [0]*len(nested)
FR, s = root(_mexpand(R), 4), sqrt(s2)
return _mexpand(s/(sqrt(2)*FR) + v[0]*FR/(sqrt(2)*s)), f
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, pack_long
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def persist(self, storageLevel):
"""
Set this RDD's storage level to persist its values across operations after the first time
it is computed. This can only be used to assign a new storage level if the RDD does not
have a storage level set yet.
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD containing the distinct elements in this RDD.
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithSplit(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithSplit(func)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed):
"""
Return a sampled subset of this RDD (relies on numpy and falls back
on default random generator if numpy is unavailable).
>>> sc.parallelize(range(0, 100)).sample(False, 0.1, 2).collect() #doctest: +SKIP
[2, 3, 20, 21, 24, 41, 42, 66, 67, 89, 90, 98]
"""
return self.mapPartitionsWithSplit(RDDSampler(withReplacement, fraction, seed).func, True)
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed):
"""
Return a fixed-size sampled subset of this RDD (currently requires numpy).
>>> sc.parallelize(range(0, 10)).takeSample(True, 10, 1) #doctest: +SKIP
[4, 2, 1, 8, 2, 7, 0, 4, 1, 4]
"""
fraction = 0.0
total = 0
multiplier = 3.0
initialCount = self.count()
maxSelected = 0
if (num < 0):
raise ValueError
if initialCount > sys.maxint - 1:
maxSelected = sys.maxint - 1
else:
maxSelected = initialCount
if num > initialCount and not withReplacement:
total = maxSelected
fraction = multiplier * (maxSelected + 1) / initialCount
else:
fraction = multiplier * (num + 1) / initialCount
total = num
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < total:
if seed > sys.maxint - 2:
seed = -1
seed += 1
samples = self.sample(withReplacement, fraction, seed).collect()
sampler = RDDSampler(withReplacement, fraction, seed+1)
sampler.shuffle(samples)
return samples[0:total]
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def _reserialize(self):
if self._jrdd_deserializer == self.ctx.serializer:
return self
else:
return self.map(lambda x: x, preservesPartitioning=True)
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc = lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5), ('little', 4), ('Mary', 1), ('was', 8), ('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
bounds = list()
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
if numPartitions > 1:
rddSize = self.count()
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
for i in range(0, numPartitions - 1):
index = (len(samples) - 1) * (i + 1) / numPartitions
bounds.append(samples[index])
def rangePartitionFunc(k):
p = 0
while p < len(bounds) and keyfunc(k) > bounds[p]:
p += 1
if ascending:
return p
else:
return numPartitions-1-p
def mapFunc(iterator):
yield sorted(iterator, reverse=(not ascending), key=lambda (k, v): keyfunc(k))
return (self.partitionBy(numPartitions, partitionFunc=rangePartitionFunc)
.mapPartitions(mapFunc,preservesPartitioning=True)
.flatMap(lambda x: x, preservesPartitioning=True))
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize([1, 2, 3]).pipe('cat').collect()
['1', '2', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in pipe.stdout)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
# TODO: aggregate
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which corrects for bias in
estimating the standard deviation by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects for bias in
estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def take(self, num):
"""
Take the first num elements of the RDD.
This currently scans the partitions *one by one*, so it will be slow if
a lot of partitions are required. In that case, use L{collect} to get
the whole RDD instead.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
"""
def takeUpToNum(iterator):
taken = 0
while taken < num:
yield next(iterator)
taken += 1
# Take only up to num elements from each partition we try
mapped = self.mapPartitions(takeUpToNum)
items = []
for partition in range(mapped._jrdd.splits().size()):
iterator = self.ctx._takePartition(mapped._jrdd.rdd(), partition)
items.extend(mapped._collect_iterator_through_file(iterator))
if len(items) >= num:
break
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
yield x.encode("utf-8")
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
def partitionBy(self, numPartitions, partitionFunc=hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
outputSerializer = self.ctx._unbatched_serializer
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for x in iterator:
(k, v) = x
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(x.groupByKey().collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions)
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as well
as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.cogroup(y).collect())
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numPartitions)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching key
in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
filter_func = lambda (key, vals): len(vals[0]) > 0 and len(vals[1]) == 0
map_func = lambda (key, vals): [(key, val) for val in vals[0]]
return self.cogroup(other, numPartitions).filter(filter_func).flatMap(map_func)
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
rdd = other.map(lambda x: (x, True)) # note: here 'True' is just a placeholder
return self.map(lambda x: (x, True)).subtractByKey(rdd).map(lambda tpl: tpl[0]) # note: here 'True' is just a placeholder
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> sorted(x.cogroup(y).collect())
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
serializer = NoOpSerializer()
else:
serializer = self.ctx.serializer
command = (self.func, self._prev_jrdd_deserializer, serializer)
pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_tag = self._prev_jrdd.classTag()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
includes = ListConverter().convert(self.ctx._python_includes,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_command), env, includes, self.preservesPartitioning,
self.ctx.pythonExec, broadcast_vars, self.ctx._javaAccumulator,
class_tag)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to Python generators of array data.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras.utils.data_utils import iter_sequence_infinite
from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
def fit_generator(model,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""See docstring for `Model.fit_generator`."""
epoch = initial_epoch
do_validation = bool(validation_data)
if not context.executing_eagerly():
model._make_train_function()
if do_validation:
model._make_test_function()
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps_per_epoch is None:
if is_sequence:
steps_per_epoch = len(generator)
else:
raise ValueError('`steps_per_epoch=None` is only valid for a'
' generator based on the `keras.utils.Sequence`'
' class. Please specify `steps_per_epoch` or use'
' the `keras.utils.Sequence` class.')
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (
hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__') or
isinstance(validation_data, Sequence))
if (val_gen and not isinstance(validation_data, Sequence) and
not validation_steps):
raise ValueError('`validation_steps=None` is only valid for a'
' generator based on the `keras.utils.Sequence`'
' class. Please specify `validation_steps` or use'
' the `keras.utils.Sequence` class.')
enqueuer = None
val_enqueuer = None
try:
val_x, val_y, val_sample_weights = validation_data, None, None
if do_validation and not val_gen:
# Prepare data for validation
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weights = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weights = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'`validation_data` should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = model._standardize_user_data(
val_x, val_y, val_sample_weights)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
epochs=epochs,
validation_steps=validation_steps,
steps_per_epoch=steps_per_epoch,
verbose=verbose)
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter_sequence_infinite(generator)
else:
output_generator = generator
callbacks.on_train_begin()
# Construct epoch logs.
epoch_logs = {}
while epoch < epochs:
for m in model.stateful_metric_functions:
m.reset_states()
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = model.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(model.metrics_names, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = evaluate_generator(
model,
validation_data,
validation_steps,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size)
else:
# No need for try/except because
# data has already been validated.
val_outs = model.evaluate(
val_x,
val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(model.metrics_names, val_outs):
epoch_logs['val_' + l] = o
if callbacks.model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callbacks.model.stop_training:
break
finally:
try:
if enqueuer is not None:
enqueuer.stop()
finally:
if val_enqueuer is not None:
val_enqueuer.stop()
callbacks.on_train_end()
return model.history
def evaluate_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.evaluate_generator`."""
if not context.executing_eagerly():
model._make_test_function()
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
steps_done = 0
all_outs = []
batch_sizes = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter_sequence_infinite(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = model.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
if batch_size == 0:
raise ValueError('Received an empty batch. '
'Batches should at least contain one item.')
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = [float(all_outs[-1][0])] # index 0 = 'loss'
averages.extend([
np.average([out[i]
for out in all_outs], weights=batch_sizes)
for i in range(1, len(outs))
])
return averages
def predict_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.predict_generator`."""
if not context.executing_eagerly():
model._make_predict_function()
steps_done = 0
all_outs = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter_sequence_infinite(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = model.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out[0] for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
| |
import logging
from typing import FrozenSet, List, Optional, Set, Tuple
import pytest
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.tags import Tag
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import (
CandidateEvaluator,
CandidatePreferences,
FormatControl,
LinkEvaluator,
PackageFinder,
_check_link_requires_python,
_extract_version_from_fragment,
_find_name_version_sep,
filter_unallowed_hashes,
)
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.network.session import PipSession
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from tests.lib import CURRENT_PY_VERSION_INFO
from tests.lib.index import make_mock_candidate
@pytest.mark.parametrize(
"requires_python, expected",
[
("== 3.6.4", False),
("== 3.6.5", True),
# Test an invalid Requires-Python value.
("invalid", True),
],
)
def test_check_link_requires_python(requires_python: str, expected: bool) -> None:
version_info = (3, 6, 5)
link = Link("https://example.com", requires_python=requires_python)
actual = _check_link_requires_python(link, version_info)
assert actual == expected
def check_caplog(
caplog: pytest.LogCaptureFixture, expected_level: str, expected_message: str
) -> None:
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == expected_level
assert record.message == expected_message
@pytest.mark.parametrize(
"ignore_requires_python, expected",
[
(
False,
(
False,
"VERBOSE",
"Link requires a different Python (3.6.5 not in: '== 3.6.4'): "
"https://example.com",
),
),
(
True,
(
True,
"DEBUG",
"Ignoring failed Requires-Python check (3.6.5 not in: '== 3.6.4') "
"for link: https://example.com",
),
),
],
)
def test_check_link_requires_python__incompatible_python(
caplog: pytest.LogCaptureFixture,
ignore_requires_python: bool,
expected: Tuple[bool, str, str],
) -> None:
"""
Test an incompatible Python.
"""
expected_return, expected_level, expected_message = expected
link = Link("https://example.com", requires_python="== 3.6.4")
caplog.set_level(logging.DEBUG)
actual = _check_link_requires_python(
link,
version_info=(3, 6, 5),
ignore_requires_python=ignore_requires_python,
)
assert actual == expected_return
check_caplog(caplog, expected_level, expected_message)
def test_check_link_requires_python__invalid_requires(
caplog: pytest.LogCaptureFixture,
) -> None:
"""
Test the log message for an invalid Requires-Python.
"""
link = Link("https://example.com", requires_python="invalid")
caplog.set_level(logging.DEBUG)
actual = _check_link_requires_python(link, version_info=(3, 6, 5))
assert actual
expected_message = (
"Ignoring invalid Requires-Python ('invalid') for link: https://example.com"
)
check_caplog(caplog, "DEBUG", expected_message)
class TestLinkEvaluator:
@pytest.mark.parametrize(
"py_version_info,ignore_requires_python,expected",
[
((3, 6, 5), False, (True, "1.12")),
# Test an incompatible Python.
((3, 6, 4), False, (False, None)),
# Test an incompatible Python with ignore_requires_python=True.
((3, 6, 4), True, (True, "1.12")),
],
)
def test_evaluate_link(
self,
py_version_info: Tuple[int, int, int],
ignore_requires_python: bool,
expected: Tuple[bool, Optional[str]],
) -> None:
target_python = TargetPython(py_version_info=py_version_info)
evaluator = LinkEvaluator(
project_name="twine",
canonical_name="twine",
formats=frozenset(["source"]),
target_python=target_python,
allow_yanked=True,
ignore_requires_python=ignore_requires_python,
)
link = Link(
"https://example.com/#egg=twine-1.12",
requires_python="== 3.6.5",
)
actual = evaluator.evaluate_link(link)
assert actual == expected
@pytest.mark.parametrize(
"yanked_reason, allow_yanked, expected",
[
(None, True, (True, "1.12")),
(None, False, (True, "1.12")),
("", True, (True, "1.12")),
("", False, (False, "yanked for reason: <none given>")),
("bad metadata", True, (True, "1.12")),
("bad metadata", False, (False, "yanked for reason: bad metadata")),
# Test a unicode string with a non-ascii character.
("curly quote: \u2018", True, (True, "1.12")),
(
"curly quote: \u2018",
False,
(False, "yanked for reason: curly quote: \u2018"),
),
],
)
def test_evaluate_link__allow_yanked(
self,
yanked_reason: str,
allow_yanked: bool,
expected: Tuple[bool, str],
) -> None:
target_python = TargetPython(py_version_info=(3, 6, 4))
evaluator = LinkEvaluator(
project_name="twine",
canonical_name="twine",
formats=frozenset(["source"]),
target_python=target_python,
allow_yanked=allow_yanked,
)
link = Link(
"https://example.com/#egg=twine-1.12",
yanked_reason=yanked_reason,
)
actual = evaluator.evaluate_link(link)
assert actual == expected
def test_evaluate_link__incompatible_wheel(self) -> None:
"""
Test an incompatible wheel.
"""
target_python = TargetPython(py_version_info=(3, 6, 4))
# Set the valid tags to an empty list to make sure nothing matches.
target_python._valid_tags = []
evaluator = LinkEvaluator(
project_name="sample",
canonical_name="sample",
formats=frozenset(["binary"]),
target_python=target_python,
allow_yanked=True,
)
link = Link("https://example.com/sample-1.0-py2.py3-none-any.whl")
actual = evaluator.evaluate_link(link)
expected = (
False,
"none of the wheel's tags (py2-none-any, py3-none-any) are compatible "
"(run pip debug --verbose to show compatible tags)",
)
assert actual == expected
@pytest.mark.parametrize(
"hex_digest, expected_versions",
[
(64 * "a", ["1.0", "1.1"]),
(64 * "b", ["1.0", "1.2"]),
(64 * "c", ["1.0", "1.1", "1.2"]),
],
)
def test_filter_unallowed_hashes(hex_digest: str, expected_versions: List[str]) -> None:
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate("1.1", hex_digest=(64 * "a")),
make_mock_candidate("1.2", hex_digest=(64 * "b")),
]
hashes_data = {
"sha256": [hex_digest],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates,
hashes=hashes,
project_name="my-project",
)
actual_versions = [str(candidate.version) for candidate in actual]
assert actual_versions == expected_versions
# Check that the return value is always different from the given value.
assert actual is not candidates
def test_filter_unallowed_hashes__no_hashes(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.DEBUG)
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate("1.1"),
]
actual = filter_unallowed_hashes(
candidates,
hashes=Hashes(),
project_name="my-project",
)
# Check that the return value is a copy.
assert actual == candidates
assert actual is not candidates
expected_message = (
"Given no hashes to check 2 links for project 'my-project': "
"discarding no candidates"
)
check_caplog(caplog, "DEBUG", expected_message)
def test_filter_unallowed_hashes__log_message_with_match(
caplog: pytest.LogCaptureFixture,
) -> None:
caplog.set_level(logging.DEBUG)
# Test 1 match, 2 non-matches, 3 no hashes so all 3 values will be
# different.
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate(
"1.1",
),
make_mock_candidate(
"1.2",
),
make_mock_candidate("1.3", hex_digest=(64 * "a")),
make_mock_candidate("1.4", hex_digest=(64 * "b")),
make_mock_candidate("1.5", hex_digest=(64 * "c")),
]
hashes_data = {
"sha256": [64 * "a", 64 * "d"],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates,
hashes=hashes,
project_name="my-project",
)
assert len(actual) == 4
expected_message = (
"Checked 6 links for project 'my-project' against 2 hashes "
"(1 matches, 3 no digest): discarding 2 non-matches:\n"
" https://example.com/pkg-1.4.tar.gz#sha256="
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n"
" https://example.com/pkg-1.5.tar.gz#sha256="
"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
)
check_caplog(caplog, "DEBUG", expected_message)
def test_filter_unallowed_hashes__log_message_with_no_match(
caplog: pytest.LogCaptureFixture,
) -> None:
caplog.set_level(logging.DEBUG)
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate("1.1", hex_digest=(64 * "b")),
make_mock_candidate("1.2", hex_digest=(64 * "c")),
]
hashes_data = {
"sha256": [64 * "a", 64 * "d"],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates,
hashes=hashes,
project_name="my-project",
)
assert len(actual) == 3
expected_message = (
"Checked 3 links for project 'my-project' against 2 hashes "
"(0 matches, 1 no digest): discarding no candidates"
)
check_caplog(caplog, "DEBUG", expected_message)
class TestCandidateEvaluator:
@pytest.mark.parametrize(
"allow_all_prereleases, prefer_binary",
[
(False, False),
(False, True),
(True, False),
(True, True),
],
)
def test_create(self, allow_all_prereleases: bool, prefer_binary: bool) -> None:
target_python = TargetPython()
target_python._valid_tags = [Tag("py36", "none", "any")]
specifier = SpecifierSet()
evaluator = CandidateEvaluator.create(
project_name="my-project",
target_python=target_python,
allow_all_prereleases=allow_all_prereleases,
prefer_binary=prefer_binary,
specifier=specifier,
)
assert evaluator._allow_all_prereleases == allow_all_prereleases
assert evaluator._prefer_binary == prefer_binary
assert evaluator._specifier is specifier
assert evaluator._supported_tags == [Tag("py36", "none", "any")]
def test_create__target_python_none(self) -> None:
"""
Test passing target_python=None.
"""
evaluator = CandidateEvaluator.create("my-project")
expected_tags = get_supported()
assert evaluator._supported_tags == expected_tags
def test_create__specifier_none(self) -> None:
"""
Test passing specifier=None.
"""
evaluator = CandidateEvaluator.create("my-project")
expected_specifier = SpecifierSet()
assert evaluator._specifier == expected_specifier
def test_get_applicable_candidates(self) -> None:
specifier = SpecifierSet("<= 1.11")
versions = ["1.10", "1.11", "1.12"]
candidates = [make_mock_candidate(version) for version in versions]
evaluator = CandidateEvaluator.create(
"my-project",
specifier=specifier,
)
actual = evaluator.get_applicable_candidates(candidates)
expected_applicable = candidates[:2]
assert [str(c.version) for c in expected_applicable] == [
"1.10",
"1.11",
]
assert actual == expected_applicable
@pytest.mark.parametrize(
"specifier, expected_versions",
[
# Test no version constraint.
(SpecifierSet(), ["1.0", "1.2"]),
# Test a version constraint that excludes the candidate whose
# hash matches. Then the non-allowed hash is a candidate.
(SpecifierSet("<= 1.1"), ["1.0", "1.1"]),
],
)
def test_get_applicable_candidates__hashes(
self,
specifier: SpecifierSet,
expected_versions: List[str],
) -> None:
"""
Test a non-None hashes value.
"""
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate("1.1", hex_digest=(64 * "a")),
make_mock_candidate("1.2", hex_digest=(64 * "b")),
]
hashes_data = {
"sha256": [64 * "b"],
}
hashes = Hashes(hashes_data)
evaluator = CandidateEvaluator.create(
"my-project",
specifier=specifier,
hashes=hashes,
)
actual = evaluator.get_applicable_candidates(candidates)
actual_versions = [str(c.version) for c in actual]
assert actual_versions == expected_versions
def test_compute_best_candidate(self) -> None:
specifier = SpecifierSet("<= 1.11")
versions = ["1.10", "1.11", "1.12"]
candidates = [make_mock_candidate(version) for version in versions]
evaluator = CandidateEvaluator.create(
"my-project",
specifier=specifier,
)
result = evaluator.compute_best_candidate(candidates)
assert result._candidates == candidates
expected_applicable = candidates[:2]
assert [str(c.version) for c in expected_applicable] == [
"1.10",
"1.11",
]
assert result._applicable_candidates == expected_applicable
assert result.best_candidate is expected_applicable[1]
def test_compute_best_candidate__none_best(self) -> None:
"""
Test returning a None best candidate.
"""
specifier = SpecifierSet("<= 1.10")
versions = ["1.11", "1.12"]
candidates = [make_mock_candidate(version) for version in versions]
evaluator = CandidateEvaluator.create(
"my-project",
specifier=specifier,
)
result = evaluator.compute_best_candidate(candidates)
assert result._candidates == candidates
assert result._applicable_candidates == []
assert result.best_candidate is None
@pytest.mark.parametrize(
"hex_digest, expected",
[
# Test a link with no hash.
(None, 0),
# Test a link with an allowed hash.
(64 * "a", 1),
# Test a link with a hash that isn't allowed.
(64 * "b", 0),
],
)
def test_sort_key__hash(self, hex_digest: Optional[str], expected: int) -> None:
"""
Test the effect of the link's hash on _sort_key()'s return value.
"""
candidate = make_mock_candidate("1.0", hex_digest=hex_digest)
hashes_data = {
"sha256": [64 * "a"],
}
hashes = Hashes(hashes_data)
evaluator = CandidateEvaluator.create("my-project", hashes=hashes)
sort_value = evaluator._sort_key(candidate)
# The hash is reflected in the first element of the tuple.
actual = sort_value[0]
assert actual == expected
@pytest.mark.parametrize(
"yanked_reason, expected",
[
# Test a non-yanked file.
(None, 0),
# Test a yanked file (has a lower value than non-yanked).
("bad metadata", -1),
],
)
def test_sort_key__is_yanked(
self, yanked_reason: Optional[str], expected: int
) -> None:
"""
Test the effect of is_yanked on _sort_key()'s return value.
"""
candidate = make_mock_candidate("1.0", yanked_reason=yanked_reason)
evaluator = CandidateEvaluator.create("my-project")
sort_value = evaluator._sort_key(candidate)
# Yanked / non-yanked is reflected in the second element of the tuple.
actual = sort_value[1]
assert actual == expected
def test_sort_best_candidate__no_candidates(self) -> None:
"""
Test passing an empty list.
"""
evaluator = CandidateEvaluator.create("my-project")
actual = evaluator.sort_best_candidate([])
assert actual is None
def test_sort_best_candidate__best_yanked_but_not_all(
self,
caplog: pytest.LogCaptureFixture,
) -> None:
"""
Test the best candidates being yanked, but not all.
"""
caplog.set_level(logging.INFO)
candidates = [
make_mock_candidate("4.0", yanked_reason="bad metadata #4"),
# Put the best candidate in the middle, to test sorting.
make_mock_candidate("2.0"),
make_mock_candidate("3.0", yanked_reason="bad metadata #3"),
make_mock_candidate("1.0"),
]
expected_best = candidates[1]
evaluator = CandidateEvaluator.create("my-project")
actual = evaluator.sort_best_candidate(candidates)
assert actual is expected_best
assert str(actual.version) == "2.0"
# Check the log messages.
assert len(caplog.records) == 0
class TestPackageFinder:
@pytest.mark.parametrize(
"allow_all_prereleases, prefer_binary",
[
(False, False),
(False, True),
(True, False),
(True, True),
],
)
def test_create__candidate_prefs(
self,
allow_all_prereleases: bool,
prefer_binary: bool,
) -> None:
"""
Test that the _candidate_prefs attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
allow_all_prereleases=allow_all_prereleases,
prefer_binary=prefer_binary,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
use_deprecated_html5lib=False,
)
candidate_prefs = finder._candidate_prefs
assert candidate_prefs.allow_all_prereleases == allow_all_prereleases
assert candidate_prefs.prefer_binary == prefer_binary
def test_create__link_collector(self) -> None:
"""
Test that the _link_collector attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
use_deprecated_html5lib=False,
)
assert finder._link_collector is link_collector
def test_create__target_python(self) -> None:
"""
Test that the _target_python attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
target_python = TargetPython(py_version_info=(3, 7, 3))
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
target_python=target_python,
use_deprecated_html5lib=False,
)
actual_target_python = finder._target_python
# The target_python attribute should be set as is.
assert actual_target_python is target_python
# Check that the attributes weren't reset.
assert actual_target_python.py_version_info == (3, 7, 3)
def test_create__target_python_none(self) -> None:
"""
Test passing target_python=None.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
target_python=None,
use_deprecated_html5lib=False,
)
# Spot-check the default TargetPython object.
actual_target_python = finder._target_python
assert actual_target_python._given_py_version_info is None
assert actual_target_python.py_version_info == CURRENT_PY_VERSION_INFO
@pytest.mark.parametrize("allow_yanked", [False, True])
def test_create__allow_yanked(self, allow_yanked: bool) -> None:
"""
Test that the _allow_yanked attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
selection_prefs = SelectionPreferences(allow_yanked=allow_yanked)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
use_deprecated_html5lib=False,
)
assert finder._allow_yanked == allow_yanked
@pytest.mark.parametrize("ignore_requires_python", [False, True])
def test_create__ignore_requires_python(self, ignore_requires_python: bool) -> None:
"""
Test that the _ignore_requires_python attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
ignore_requires_python=ignore_requires_python,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
use_deprecated_html5lib=False,
)
assert finder._ignore_requires_python == ignore_requires_python
def test_create__format_control(self) -> None:
"""
Test that the format_control attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
format_control = FormatControl(set(), {":all:"})
selection_prefs = SelectionPreferences(
allow_yanked=True,
format_control=format_control,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
use_deprecated_html5lib=False,
)
actual_format_control = finder.format_control
assert actual_format_control is format_control
# Check that the attributes weren't reset.
assert actual_format_control.only_binary == {":all:"}
@pytest.mark.parametrize(
"allow_yanked, ignore_requires_python, only_binary, expected_formats",
[
(False, False, {}, frozenset({"binary", "source"})),
# Test allow_yanked=True.
(True, False, {}, frozenset({"binary", "source"})),
# Test ignore_requires_python=True.
(False, True, {}, frozenset({"binary", "source"})),
# Test a non-trivial only_binary.
(False, False, {"twine"}, frozenset({"binary"})),
],
)
def test_make_link_evaluator(
self,
allow_yanked: bool,
ignore_requires_python: bool,
only_binary: Set[str],
expected_formats: FrozenSet[str],
) -> None:
# Create a test TargetPython that we can check for.
target_python = TargetPython(py_version_info=(3, 7))
format_control = FormatControl(set(), only_binary)
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder(
link_collector=link_collector,
target_python=target_python,
allow_yanked=allow_yanked,
format_control=format_control,
ignore_requires_python=ignore_requires_python,
use_deprecated_html5lib=False,
)
# Pass a project_name that will be different from canonical_name.
link_evaluator = finder.make_link_evaluator("Twine")
assert link_evaluator.project_name == "Twine"
assert link_evaluator._canonical_name == "twine"
assert link_evaluator._allow_yanked == allow_yanked
assert link_evaluator._ignore_requires_python == ignore_requires_python
assert link_evaluator._formats == expected_formats
# Test the _target_python attribute.
actual_target_python = link_evaluator._target_python
# The target_python attribute should be set as is.
assert actual_target_python is target_python
# For good measure, check that the attributes weren't reset.
assert actual_target_python._given_py_version_info == (3, 7)
assert actual_target_python.py_version_info == (3, 7, 0)
@pytest.mark.parametrize(
"allow_all_prereleases, prefer_binary",
[
(False, False),
(False, True),
(True, False),
(True, True),
],
)
def test_make_candidate_evaluator(
self,
allow_all_prereleases: bool,
prefer_binary: bool,
) -> None:
target_python = TargetPython()
target_python._valid_tags = [Tag("py36", "none", "any")]
candidate_prefs = CandidatePreferences(
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
)
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder(
link_collector=link_collector,
target_python=target_python,
allow_yanked=True,
candidate_prefs=candidate_prefs,
use_deprecated_html5lib=False,
)
specifier = SpecifierSet()
# Pass hashes to check that _hashes is set.
hashes = Hashes({"sha256": [64 * "a"]})
evaluator = finder.make_candidate_evaluator(
"my-project",
specifier=specifier,
hashes=hashes,
)
assert evaluator._allow_all_prereleases == allow_all_prereleases
assert evaluator._hashes == hashes
assert evaluator._prefer_binary == prefer_binary
assert evaluator._project_name == "my-project"
assert evaluator._specifier is specifier
assert evaluator._supported_tags == [Tag("py36", "none", "any")]
@pytest.mark.parametrize(
("fragment", "canonical_name", "expected"),
[
# Trivial.
("pip-18.0", "pip", 3),
("zope-interface-4.5.0", "zope-interface", 14),
# Canonicalized name match non-canonicalized egg info. (pypa/pip#5870)
("Jinja2-2.10", "jinja2", 6),
("zope.interface-4.5.0", "zope-interface", 14),
("zope_interface-4.5.0", "zope-interface", 14),
# Should be smart enough to parse ambiguous names from the provided
# package name.
("foo-2-2", "foo", 3),
("foo-2-2", "foo-2", 5),
# Should be able to detect collapsed characters in the egg info.
("foo--bar-1.0", "foo-bar", 8),
("foo-_bar-1.0", "foo-bar", 8),
# The package name must not ends with a dash (PEP 508), so the first
# dash would be the separator, not the second.
("zope.interface--4.5.0", "zope-interface", 14),
("zope.interface--", "zope-interface", 14),
# The version part is missing, but the split function does not care.
("zope.interface-", "zope-interface", 14),
],
)
def test_find_name_version_sep(
fragment: str, canonical_name: str, expected: int
) -> None:
index = _find_name_version_sep(fragment, canonical_name)
assert index == expected
@pytest.mark.parametrize(
("fragment", "canonical_name"),
[
# A dash must follow the package name.
("zope.interface4.5.0", "zope-interface"),
("zope.interface.4.5.0", "zope-interface"),
("zope.interface.-4.5.0", "zope-interface"),
("zope.interface", "zope-interface"),
],
)
def test_find_name_version_sep_failure(fragment: str, canonical_name: str) -> None:
with pytest.raises(ValueError) as ctx:
_find_name_version_sep(fragment, canonical_name)
message = f"{fragment} does not match {canonical_name}"
assert str(ctx.value) == message
@pytest.mark.parametrize(
("fragment", "canonical_name", "expected"),
[
# Trivial.
("pip-18.0", "pip", "18.0"),
("zope-interface-4.5.0", "zope-interface", "4.5.0"),
# Canonicalized name match non-canonicalized egg info. (pypa/pip#5870)
("Jinja2-2.10", "jinja2", "2.10"),
("zope.interface-4.5.0", "zope-interface", "4.5.0"),
("zope_interface-4.5.0", "zope-interface", "4.5.0"),
# Should be smart enough to parse ambiguous names from the provided
# package name.
("foo-2-2", "foo", "2-2"),
("foo-2-2", "foo-2", "2"),
("zope.interface--4.5.0", "zope-interface", "-4.5.0"),
("zope.interface--", "zope-interface", "-"),
# Should be able to detect collapsed characters in the egg info.
("foo--bar-1.0", "foo-bar", "1.0"),
("foo-_bar-1.0", "foo-bar", "1.0"),
# Invalid.
("the-package-name-8.19", "does-not-match", None),
("zope.interface.-4.5.0", "zope.interface", None),
("zope.interface-", "zope-interface", None),
("zope.interface4.5.0", "zope-interface", None),
("zope.interface.4.5.0", "zope-interface", None),
("zope.interface.-4.5.0", "zope-interface", None),
("zope.interface", "zope-interface", None),
],
)
def test_extract_version_from_fragment(
fragment: str, canonical_name: str, expected: Optional[str]
) -> None:
version = _extract_version_from_fragment(fragment, canonical_name)
assert version == expected
| |
import logging
from urllib.error import HTTPError, URLError
from xml.dom.minidom import parseString
from django import forms
from django.utils.translation import ugettext_lazy as _, ugettext
from reviewboard.hostingsvcs.errors import (AuthorizationError,
HostingServiceAPIError,
RepositoryError)
from reviewboard.hostingsvcs.forms import (HostingServiceAuthForm,
HostingServiceForm)
from reviewboard.hostingsvcs.service import (HostingService,
HostingServiceClient)
from reviewboard.scmtools.crypto_utils import (decrypt_password,
encrypt_password)
from reviewboard.scmtools.errors import FileNotFoundError
class CodebaseHQAuthForm(HostingServiceAuthForm):
api_key = forms.CharField(
label=_('API key'),
max_length=128,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The API key provided to your Codebase account. This is '
'available in My Profile under API Credentials.'))
domain = forms.CharField(
label=_('Codebase domain'),
max_length=128,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The subdomain used to access your Codebase account. '
'This is the "<tt>subdomain</tt>" of '
'<tt>subdomain</tt>.codebasehq.com.'))
def get_credentials(self):
credentials = super(CodebaseHQAuthForm, self).get_credentials()
credentials.update({
'domain': self.cleaned_data['domain'],
'api_key': self.cleaned_data['api_key'],
})
return credentials
class Meta(object):
help_texts = {
'hosting_account_username': _(
'The username you use to log into Codebase. This should '
'<em>not</em> include the domain name.'
),
'hosting_account_password': _(
'The password you use to log into Codebase. This is separate '
'from the API key below.'
),
}
class CodebaseHQForm(HostingServiceForm):
codebasehq_project_name = forms.CharField(
label=_('Project name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}))
codebasehq_repo_name = forms.CharField(
label=_('Repository short name'),
max_length=128,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The short name of your repository. This can be found by '
'clicking the Settings button on the right-hand '
'side of the repository browser.'))
class CodebaseHQClient(HostingServiceClient):
"""Client for talking to the Codebase API.
This implements the API methods that the hosting service needs, converting
requests into API calls and those back into structured results.
"""
#: Mimetype used for API requests and responses.
API_MIMETYPE = 'application/xml'
def __init__(self, hosting_service):
"""Initialize the client.
Args:
hosting_service (CodebaseHQ):
The hosting service that owns this client.
"""
self.hosting_service = hosting_service
def api_get_file(self, repository, project_name, repo_name, path,
revision):
"""Return the content of a file in a repository.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository entry in Review Board.
project_name (unicode):
The name of the Codebase project.
repo_name (unicode):
The name of the repository.
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file or commit.
Returns:
bytes:
The contents of the file.
"""
url = '%s/%s/blob/' % (project_name, repo_name)
if repository.tool.name == 'Git':
url += revision
else:
if path.startswith('/'):
path = path[1:]
url += '%s/%s' % (revision, path)
return self.api_get(self.build_api_url(url), raw_content=True)
def api_get_public_keys(self, username):
"""Return information on all public keys for a user.
Args:
username (unicode):
The user to fetch public keys for.
Returns:
dict:
Information on each of the user's public keys.
"""
return self.api_get(self.build_api_url('users/%s/public_keys'
% username))
def api_get_repository(self, project_name, repo_name):
"""Return information on a repository.
Args:
project_name (unicode):
The name of the Codebase project.
repo_name (unicode):
The name of the repository.
Returns:
dict:
Information on the repository.
See https://support.codebasehq.com/kb/repositories for the
data returned.
"""
return self.api_get(
self.build_api_url('%s/%s' % (project_name, repo_name)))
def build_api_url(self, url):
"""Return the URL for an API call.
Args:
url (unicode):
The relative URL for the API call.
Returns:
unicode:
The absolute URL for the API call.
"""
return 'https://api3.codebasehq.com/%s' % url
def api_get(self, url, raw_content=False):
"""Perform an HTTP GET request to the API.
Args:
url (unicode):
The full URL to the API resource.
raw_content (bool, optional):
If set to ``True``, the raw content of the result will be
returned, instead of a parsed XML result.
Returns:
object:
The parsed content of the result, as a dictionary, or the raw
bytes content if ``raw_content`` is ``True``.
"""
hosting_service = self.hosting_service
try:
account_data = hosting_service.account.data
api_username = '%s/%s' % (account_data['domain'],
hosting_service.account.username)
api_key = decrypt_password(account_data['api_key'])
response = self.http_get(
url,
username=api_username,
password=api_key,
headers={
'Accept': self.API_MIMETYPE,
})
data = response.data
if raw_content:
return data
else:
return self.parse_xml(data)
except HTTPError as e:
data = e.read()
msg = str(e)
rsp = self.parse_xml(data)
if rsp and 'errors' in rsp:
errors = rsp['errors']
if 'error' in errors:
msg = errors['error']
if e.code == 401:
raise AuthorizationError(msg)
else:
raise HostingServiceAPIError(msg, http_code=e.code, rsp=rsp)
except URLError as e:
raise HostingServiceAPIError(e.reason)
def get_xml_text(self, nodes):
"""Return the text contents of a set of XML nodes.
Args:
nodes (list of xml.dom.minidom.Element):
The list of nodes.
Returns:
unicode:
The text content of the nodes.
"""
return ''.join(
node.data
for node in nodes
if node.nodeType == node.TEXT_NODE
)
def parse_xml(self, s):
"""Return the parsed content for an XML document.
Args:
s (unicode):
The XML document as a string.
Returns:
dict:
The parsed content of the XML document, with each key
being a dictionary of other parsed content.
If the document cannot be parsed, this will return ``None``.
"""
try:
doc = parseString(s)
except:
return None
root = doc.documentElement
return {
root.tagName: self._parse_xml_node(root),
}
def _parse_xml_node(self, node):
"""Return the parsed content for a node in an XML document.
This parses the content of a Codebase XML document, turning it into
arrays, strings, and dictionaries of data.
Args:
node (xml.dom.minidom.Element):
The node being parsed.
Returns:
object:
The parsed content of the node, based on the type of node being
processed.
"""
node_type = node.getAttribute('type')
is_nil = node.getAttribute('nil')
if node_type == 'array':
result = [
self._parse_xml_node(child)
for child in node.childNodes
if child.nodeType == child.ELEMENT_NODE
]
elif is_nil == 'true':
result = None
else:
child_nodes = [
child
for child in node.childNodes
if child.nodeType == child.ELEMENT_NODE
]
if child_nodes:
result = dict([
(child.tagName, self._parse_xml_node(child))
for child in child_nodes
])
else:
result = self.get_xml_text(node.childNodes)
return result
class CodebaseHQ(HostingService):
"""Repository hosting support for Codebase.
Codebase is a repository hosting service that supports Subversion, Git,
and Mercurial. It's available at https://codebasehq.com.
This integration provides repository validation and file fetching. Due to
API limitations, it does not support post-commit review at this time.
"""
name = 'Codebase HQ'
form = CodebaseHQForm
auth_form = CodebaseHQAuthForm
needs_authorization = True
supports_bug_trackers = True
supports_repositories = True
supported_scmtools = ['Git', 'Subversion', 'Mercurial']
repository_fields = {
'Git': {
'path': 'git@codebasehq.com:%(domain)s/'
'%(codebasehq_project_name)s/'
'%(codebasehq_repo_name)s.git',
},
'Subversion': {
'path': 'https://%(domain)s.codebasehq.com/'
'%(codebasehq_project_name)s/'
'%(codebasehq_repo_name)s.svn',
},
'Mercurial': {
'path': 'https://%(domain)s.codebasehq.com/'
'projects/%(codebasehq_project_name)s/repositories/'
'%(codebasehq_repo_name)s/',
},
}
bug_tracker_field = (
'https://%(domain)s.codebasehq.com/projects/'
'%(codebasehq_project_name)s/tickets/%%s'
)
#: A mapping of Codebase SCM types to SCMTool names.
REPO_SCM_TOOL_MAP = {
'git': 'Git',
'svn': 'Subversion',
'hg': 'Mercurial',
}
def __init__(self, *args, **kwargs):
"""Initialize the hosting service.
Args:
*args (tuple):
Positional arguments for the parent constructor.
**kwargs (dict):
Keyword arguments for the parent constructor.
"""
super(CodebaseHQ, self).__init__(*args, **kwargs)
self.client = CodebaseHQClient(self)
def authorize(self, username, password, credentials, *args, **kwargs):
"""Authorize an account for Codebase.
Codebase usees HTTP Basic Auth with an API username (consisting of the
Codebase team's domain and the account username) and an API key (for
the password) for API calls, and a standard username/password for
Subversion repository access. We need to store all of this.
Args:
username (unicode):
The username to authorize.
password (unicode):
The API token used as a password.
credentials (dict):
Additional credentials from the authentication form.
*args (tuple):
Extra unused positional arguments.
**kwargs (dict):
Extra unused keyword arguments.
Raises:
reviewboard.hostingsvcs.errors.AuthorizationError:
The credentials provided were not valid.
"""
self.account.data.update({
'domain': credentials['domain'],
'api_key': encrypt_password(credentials['api_key']),
'password': encrypt_password(password),
})
# Test the account to make sure the credentials are fine. Note that
# we can only really sanity-check the API token, domain, and username
# from here. There's no way good way to check the actual password,
# which we only use for Subversion repositories.
#
# This will raise a suitable error message if authorization fails.
try:
self.client.api_get_public_keys(username)
except AuthorizationError:
raise AuthorizationError(
ugettext('One or more of the credentials provided were not '
'accepted by Codebase.'))
self.account.save()
def is_authorized(self):
"""Return if the account has been authorized.
This checks if all the modern authentication details are stored along
with the account.
Returns:
bool:
``True`` if all required credentials are set for the account.
"""
return (self.account.data.get('api_key') is not None and
self.account.data.get('password') is not None and
self.account.data.get('domain') is not None)
def get_password(self):
"""Return the password for this account.
This is used primarily for Subversion repositories, so that direct
access can be performed in order to fetch properties and other
information.
This does not return the API key.
Returns:
unicode:
The account password for repository access.
"""
return decrypt_password(self.account.data['password'])
def check_repository(self, codebasehq_project_name=None,
codebasehq_repo_name=None, tool_name=None,
*args, **kwargs):
"""Check the validity of a repository.
This will perform an API request against Codebase to get information on
the repository. This will throw an exception if the repository was not
found, and return cleanly if it was found.
Args:
codebase_project_name (unicode):
The name of the project on Codebase.
codebasehq_repo_name (unicode):
The name of the repository on Codebase.
tool_name (unicode):
The name of the SCMTool for the repository.
*args (tuple):
Extra unused positional arguments passed to this function.
**kwargs (dict):
Extra unused keyword arguments passed to this function.
Raises:
reviewboard.hostingsvcs.errors.RepositoryError:
The repository was not found.
"""
# The form should enforce these values.
assert codebasehq_project_name
assert codebasehq_repo_name
assert tool_name
try:
info = self.client.api_get_repository(codebasehq_project_name,
codebasehq_repo_name)
except HostingServiceAPIError as e:
logging.error('Error finding Codebase repository "%s" for '
'project "%s": %s',
codebasehq_repo_name, codebasehq_project_name,
e)
raise RepositoryError(
ugettext('A repository with this name and project was '
'not found.'))
try:
scm_type = info['repository']['scm']
except KeyError:
logging.error('Missing "scm" field for Codebase HQ repository '
'payload: %r',
info)
raise RepositoryError(
ugettext('Unable to determine the type of repository '
'from the Codebase API. Please report this.'))
try:
expected_tool_name = self.REPO_SCM_TOOL_MAP[scm_type]
except KeyError:
logging.error('Unexpected "scm" value "%s" for Codebase HQ '
'repository, using payload: %r',
scm_type, info)
raise RepositoryError(
ugettext('Unable to determine the type of repository '
'from the Codebase API. Please report this.'))
if expected_tool_name != tool_name:
raise RepositoryError(
ugettext("The repository type doesn't match what you "
"selected. Did you mean %s?")
% expected_tool_name)
def get_file(self, repository, path, revision, *args, **kwargs):
"""Returns the content of a file in a repository.
This will perform an API request to fetch the contents of a file.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository containing the file.
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file in the repository.
*args (tuple):
Extra unused positional arguments passed to this function.
**kwargs (dict):
Extra unused keyword arguments passed to this function.
Returns:
byets:
The content of the file in the repository.
"""
try:
return self.client.api_get_file(
repository,
repository.extra_data['codebasehq_project_name'],
repository.extra_data['codebasehq_repo_name'],
path, revision)
except HostingServiceAPIError as e:
if e.http_code == 404:
raise FileNotFoundError(path, revision)
else:
logging.warning('Failed to fetch file from Codebase HQ '
'repository %s: %s',
repository, e)
raise
def get_file_exists(self, repository, path, revision, *args, **kwargs):
"""Returns whether a given file exists.
This will perform an API request to fetch the contents of a file,
returning ``True`` if the content could be fetched.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository containing the file.
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file in the repository.
*args (tuple):
Extra unused positional arguments passed to this function.
**kwargs (dict):
Extra unused keyword arguments passed to this function.
Returns:
bool:
``True`` if the file exists in the repository.
"""
try:
self.client.api_get_file(
repository,
repository.extra_data['codebasehq_project_name'],
repository.extra_data['codebasehq_repo_name'],
path, revision)
return True
except HostingServiceAPIError:
return False
| |
from django.template import loader
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.db.models import Q
from django.forms import ModelChoiceField
from django.http import QueryDict
from nadmin.sites import site
from nadmin.views import ModelAdminView, BaseAdminPlugin, ListAdminView
from nadmin.views.list import COL_LIST_VAR, ORDER_VAR
from nadmin.views.dashboard import widget_manager, BaseWidget, PartialBaseWidget
from nadmin.filters import FILTER_PREFIX, SEARCH_VAR
from nadmin.plugins.relate import RELATE_PREFIX
from nadmin.models import Bookmark
csrf_protect_m = method_decorator(csrf_protect)
class BookmarkPlugin(BaseAdminPlugin):
# [{'title': "Female", 'query': {'gender': True}, 'order': ('-age'), 'cols': ('first_name', 'age', 'phones'), 'search': 'Tom'}]
list_bookmarks = []
show_bookmarks = True
def has_change_permission(self, obj=None):
if not obj or self.user.is_superuser:
return True
else:
return obj.user == self.user
def get_context(self, context):
if not self.show_bookmarks:
return context
bookmarks = []
current_qs = '&'.join(['%s=%s' % (k, v) for k, v in sorted(
filter(lambda i: bool(i[1] and (i[0] in (COL_LIST_VAR, ORDER_VAR, SEARCH_VAR) or i[0].startswith(FILTER_PREFIX)
or i[0].startswith(RELATE_PREFIX))), self.request.GET.items()))])
model_info = (self.opts.app_label, self.opts.model_name)
has_selected = False
menu_title = _(u"Bookmark")
list_base_url = reverse('nadmin:%s_%s_changelist' %
model_info, current_app=self.admin_site.name)
# local bookmarks
for bk in self.list_bookmarks:
title = bk['title']
params = dict(
[(FILTER_PREFIX + k, v) for (k, v) in bk['query'].items()])
if 'order' in bk:
params[ORDER_VAR] = '.'.join(bk['order'])
if 'cols' in bk:
params[COL_LIST_VAR] = '.'.join(bk['cols'])
if 'search' in bk:
params[SEARCH_VAR] = bk['search']
def check_item(i):
return bool(i[1]) or i[1] == False
bk_qs = '&'.join(['%s=%s' % (k, v) for k, v in sorted(filter(check_item, params.items()))])
url = list_base_url + '?' + bk_qs
selected = (current_qs == bk_qs)
bookmarks.append(
{'title': title, 'selected': selected, 'url': url})
if selected:
menu_title = title
has_selected = True
content_type = ContentType.objects.get_for_model(self.model)
bk_model_info = (Bookmark._meta.app_label, Bookmark._meta.model_name)
bookmarks_queryset = Bookmark.objects.filter(
content_type=content_type,
url_name='nadmin:%s_%s_changelist' % model_info
).filter(Q(user=self.user) | Q(is_share=True))
for bk in bookmarks_queryset:
selected = (current_qs == bk.query)
if self.has_change_permission(bk):
change_or_detail = 'change'
else:
change_or_detail = 'detail'
bookmarks.append({'title': bk.title, 'selected': selected, 'url': bk.url, 'edit_url':
reverse('nadmin:%s_%s_%s' % (bk_model_info[0], bk_model_info[1], change_or_detail),
args=(bk.id,))})
if selected:
menu_title = bk.title
has_selected = True
post_url = reverse('nadmin:%s_%s_bookmark' % model_info,
current_app=self.admin_site.name)
new_context = {
'bk_menu_title': menu_title,
'bk_bookmarks': bookmarks,
'bk_current_qs': current_qs,
'bk_has_selected': has_selected,
'bk_list_base_url': list_base_url,
'bk_post_url': post_url,
'has_add_permission_bookmark': self.admin_view.request.user.has_perm('nadmin.add_bookmark'),
'has_change_permission_bookmark': self.admin_view.request.user.has_perm('nadmin.change_bookmark')
}
context.update(new_context)
return context
# Media
def get_media(self, media):
return media + self.vendor('nadmin.plugin.bookmark.js')
# Block Views
def block_nav_menu(self, context, nodes):
if self.show_bookmarks:
nodes.insert(0, loader.render_to_string('nadmin/blocks/model_list.nav_menu.bookmarks.html', context_instance=context))
class BookmarkView(ModelAdminView):
@csrf_protect_m
@transaction.atomic
def post(self, request):
model_info = (self.opts.app_label, self.opts.model_name)
url_name = 'nadmin:%s_%s_changelist' % model_info
bookmark = Bookmark(
content_type=ContentType.objects.get_for_model(self.model),
title=request.POST[
'title'], user=self.user, query=request.POST.get('query', ''),
is_share=request.POST.get('is_share', 0), url_name=url_name)
bookmark.save()
content = {'title': bookmark.title, 'url': bookmark.url}
return self.render_response(content)
class BookmarkAdmin(object):
model_icon = 'fa fa-book'
list_display = ('title', 'user', 'url_name', 'query')
list_display_links = ('title',)
user_fields = ['user']
hidden_menu = True
def queryset(self):
if self.user.is_superuser:
return Bookmark.objects.all()
return Bookmark.objects.filter(Q(user=self.user) | Q(is_share=True))
def get_list_display(self):
list_display = super(BookmarkAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def has_change_permission(self, obj=None):
if not obj or self.user.is_superuser:
return True
else:
return obj.user == self.user
@widget_manager.register
class BookmarkWidget(PartialBaseWidget):
widget_type = _('bookmark')
widget_icon = 'fa fa-bookmark'
description = _(
'Bookmark Widget, can show user\'s bookmark list data in widget.')
template = "nadmin/widgets/list.html"
bookmark = ModelChoiceField(
label=_('Bookmark'), queryset=Bookmark.objects.all(), required=False)
def setup(self):
BaseWidget.setup(self)
bookmark = self.cleaned_data['bookmark']
model = bookmark.content_type.model_class()
data = QueryDict(bookmark.query)
self.bookmark = bookmark
if not self.title:
self.title = unicode(bookmark)
req = self.make_get_request("", data.items())
self.list_view = self.get_view_class(
ListAdminView, model, list_per_page=10, list_editable=[])(req)
def has_perm(self):
return True
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.bookmark.url
site.register(Bookmark, BookmarkAdmin)
site.register_plugin(BookmarkPlugin, ListAdminView)
site.register_modelview(r'^bookmark/$', BookmarkView, name='%s_%s_bookmark')
| |
#!/usr/bin/env python
## top-level script for combining scores into composite statistics as part of CMS 2.0.
## last updated: 07.24.2017 vitti@broadinstitute.org #update docstrings, clean up common args
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from power.parse_func import get_neut_repfile_name, get_sel_repfile_name, get_emp_cms_file, get_sim_component_score_files, get_emp_component_score_files, load_empscores
from combine.input_func import write_perpop_ihh_from_xp, write_run_paramfile, write_pair_sourcefile, normalize
from combine.viz_func import hapSort_coreallele, hapSort, hapViz, readAnnotations, find_snp_index, pullRegion, load_from_hap
from dists.scores_func import calc_fst_deldaf, calc_delihh
from dists.freqbins_func import check_create_dir, execute
from dists.likes_func import get_master_likefiles
import numpy as np
import subprocess
import argparse
import gzip
import sys
import os
def check_zip(infilename):
if not os.path.isfile(infilename):
zipfilename = infilename + ".gz"
if os.path.isfile(zipfilename):
unzip_cmd = "gunzip " + zipfilename
subprocess.check_output(unzip_cmd.split())
return
#############################
## DEFINE ARGUMENT PARSER ###
#############################
def full_parser_composite():
parser=argparse.ArgumentParser(description="This script contains command-line utilities for manipulating and combining component statistics")
subparsers = parser.add_subparsers(help="sub-commands")
##############################
## RECALCULATE INPUT SCORES ##
##############################
freqscores_parser = subparsers.add_parser('freqscores', help="Calculate allele frequency-based scores (Fst and delDAF) for a pair of populations.")
if True:
freqscores_parser.add_argument('inTped1', type=str, action="store", help="input tped 1")
freqscores_parser.add_argument('inTped2', type=str, action="store", help="input tped 2")
freqscores_parser.add_argument('recomFile', type=str, action="store", help="input recombination file") #should work around this
freqscores_parser.add_argument('outfile', type=str, action="store", help="file to write")
freqscores_parser.add_argument('--modelpath', action='store', type=str, default='cms/cms/model/', help="path to model directory containing executables") #will become redundant with conda
delihh_from_ihs_parser = subparsers.add_parser('delihh_from_ihs', help="Calculate delIHH values from iHS output files.")
if True:
delihh_from_ihs_parser.add_argument('readfile', type=str, action='store', help='input ihs file')
delihh_from_ihs_parser.add_argument('writefile', type=str, action='store', help='delihh file to write')
ihh_from_xp_parser = subparsers.add_parser('ihh_from_xp', help="extract per-pop iHH values from XP-EHH and write to individual files to facilitate arbitrary population comparisons ")
if True:
ihh_from_xp_parser.add_argument('inXpehh', type=str, help="input xpehh file")
ihh_from_xp_parser.add_argument('outIhh', type=str, help="write to file")
ihh_from_xp_parser.add_argument('takePop', default=1, type=int, help="write for first (1) or second (2) pop in XP-EHH file?")
xp_from_ihh_parser = subparsers.add_parser('xp_from_ihh', help="Calculate XP-EHH based on two per-pop iHH files.")
if True:
xp_from_ihh_parser.add_argument('inIhh1', type=str, action='store', help="input ihh file 1")
xp_from_ihh_parser.add_argument('inIhh2', type=str, action='store', help="input ihh file 2")
xp_from_ihh_parser.add_argument('outfilename', type=str, action='store', help="write to file")
######################################
## CALCULATING COMPOSITE STATISTICS ##
######################################
composite_sims_parser = subparsers.add_parser('composite_sims', help='calculate composite scores for simulations')
composite_sims_parser.add_argument('--regional_cms', action="store_true", default=False, help="calculate within-region CMS rather than genome-wide CMS")
composite_sims_parser.add_argument('--scenariopop', action="store", help="sel directory")
composite_emp_parser = subparsers.add_parser('composite_emp', help="calculate composite scores for empirical data")
composite_emp_parser.add_argument('--score_basedir', default="/n/regal/sabeti_lab/jvitti/clear-synth/1kg_scores/")
composite_emp_parser.add_argument('--regional_cms_chrom', type=int, action="store", help="if included, calculate within-region CMS (rather than CMS_gw) for specified bounds at this chromosome")
composite_emp_parser.add_argument('--composite_writedir', type=str, action="store", help="write output to", default="")
normsims_genomewide_parser = subparsers.add_parser('normsims_genomewide', help="normalize simulated composite scores to neutral")
normemp_genomewide_parser = subparsers.add_parser('normemp_genomewide', help="normalize CMS scores to genome-wide") #norm emp REGIONS?
normemp_genomewide_parser.add_argument('--score_basedir', default="/n/regal/sabeti_lab/jvitti/clear-synth/1kg_scores/")
###############
## VISUALIZE ##
###############
hapviz_parser = subparsers.add_parser('hapviz', help="Visualize haplotypes for region")
if True:
hapviz_parser.add_argument('inputfile', type=str, action="store", help="input tped")
hapviz_parser.add_argument('--startpos',type=int, help="define physical bounds of region")
hapviz_parser.add_argument('--endpos',type=int, help="define physical bounds of region")
hapviz_parser.add_argument('out',type=str,default=None, help="save image as file")
hapviz_parser.add_argument('--corepos',type=int,default=-1, help="partition haplotypes based on allele status at this position")
hapviz_parser.add_argument('--title', type=str, default=None, help="title to give to plot")
hapviz_parser.add_argument('--annotate', type=str, default=None, help="tab-delimited file where each line gives <chr.pos>\t<annotation>")
hapviz_parser.add_argument('--maf', type=str, default=None, help="filter on minor allele frequency (e.g. .01, .05)")
hapviz_parser.add_argument('--dpi', type=str, default=300, help="image resolution")
ml_region_parser = subparsers.add_parser('ml_region', help='machine learning algorithm (within-region)') #connect to AJ work- this could also go in above section
ucsc_viz_parser = subparsers.add_parser('ucsc_viz', help="Generate trackfiles of CMS scores for visualization in the UCSC genome browser.")
if True:
ucsc_viz_parser.add_argument('infile_prefix', type=str, action="store", help="prefix of file containing scores to be reformatted (e.g. 'score_chr' for files named scores_chr#.txt)")
ucsc_viz_parser.add_argument('outfile', type=str, action="store", help="file to write")
ucsc_viz_parser.add_argument('--posIndex', type=int, action="store", default=1, help="index for column of datafile containing physical position (zero-indexed)")
ucsc_viz_parser.add_argument('--scoreIndex', type=str, action="store", default=-2, help="index for column of datafile containing score (zero-indexed)")
ucsc_viz_parser.add_argument('--strip_header', action="store_true", help="if input files include header line")
#################
## SHARED ARGS ##
#################
for commonparser in [xp_from_ihh_parser, composite_sims_parser, composite_emp_parser, normemp_genomewide_parser]:
commonparser.add_argument('--cmsdir', help='TEMPORARY, will become redundant with conda packaging', action = 'store', default= "/idi/sabeti-scratch/jvitti/cms/cms/")
commonparser.add_argument('--printOnly', action='store_true', help='print rather than execute pipeline commands')
for sim_parser in [composite_sims_parser, normsims_genomewide_parser]:
sim_parser.add_argument('--nrep_sel', type= int, action='store', default='500')
sim_parser.add_argument('--nrep_neut', type= int, action='store', default='1000')
for composite_parser in [composite_sims_parser, composite_emp_parser]:
composite_parser.add_argument('--likes_masterDir', type=str, default="/n/regal/sabeti_lab/jvitti/clear-synth/sims_reeval/likes_masters/", help="location of likelihood tables, defined")
composite_parser.add_argument('--likes_nonSel', type=str, default="vsNeut", help='do we use completely neutral, or linked neutral SNPs for our non-causal distributions? by default, uses strict neutral (CMSgw)')
composite_parser.add_argument('--likes_freqSuffix', type=str, default="allFreqs", help='for causal SNPs, include suffix to specify which selbins to include')
composite_parser.add_argument('--cutoffline', type=str, default="250000\t1250000\t0\t1", help='specify bounds to include/exclude in calculations, along with MAF filter and likes decomposition')
composite_parser.add_argument('--includeline', type=str, default="0\t0\t0\t0\t0\t0", help='specify (0:yes; 1:no) which scores to include: iHS, ... ') #JV complete
for cms_parser in [composite_sims_parser, composite_emp_parser, normsims_genomewide_parser, normemp_genomewide_parser]:
cms_parser.add_argument('--writedir', type=str, default="", help="specify relative path") #ENFORCE CONSISTENCY - assumes e.g. model with sim scores live in this folder
cms_parser.add_argument('--runSuffix', type=str, default=None, help='add a suffix to .cms file (corresponds to runparamfile)')
for common_parser in [composite_sims_parser, composite_emp_parser, normsims_genomewide_parser]:
common_parser.add_argument('--simpop', action='store', help='simulated population', default=1)
for emp_parser in [composite_emp_parser, normemp_genomewide_parser]:
emp_parser.add_argument('--emppop', action='store', help='empirical population', default="YRI")
for common_parser in [normsims_genomewide_parser, composite_sims_parser, composite_emp_parser]:
common_parser.add_argument('--model', type=str, action="store", default="nulldefault")
common_parser.add_argument('--checkOverwrite', action="store_true", default=False)
#for commonparser in [composite_sims_parser, normsims_genomewide_parser, composite_emp_parser, normemp_genomewide_parser]: =
## commonparser.add_argument('--suffix', type= str, action='store', default='')
# commonparser.add_argument('--simpop', action='store', help='simulated population', default=1)
# commonparser.add_argument('--emppop', action='store', help='empirical population', default="YRI")
# commonparser.add_argument('--model', type=str, default="nulldefault")
# commonparser.add_argument('--nrep', type=int, default=1000) #hmm remove for normemp_genomewide
return parser
############################
## DEFINE EXEC FUNCTIONS ###
############################
### Recalculate ancillary scores
### from primary component scores
def execute_freqscores(args):
''' python wrapper for program to calculate Fst and delDAF '''
calc_fst_deldaf(args.inTped1, args.inTped2, args.recomFile, args.outfile, args.modelpath) #should obviate need for recom file.
return
def execute_delihh_from_ihs(args):
''' python wrapper for program to calculate delIHH from iHS file (containing iHH0, iHH1) '''
calc_delihh(args.readfile, args.writefile)
return
def execute_ihh_from_xp(args):
''' extract per-pop iHH scores from XP-EHH file and write to individual files to facilitate arbitrary population comparions '''
takefile = args.inXpehh
writefile = args.outIhh
popNum = args.takePop
write_perpop_ihh_from_xp(takefile, writefile, popNum)
return
def execute_xp_from_ihh(args):
''' python wrapper for program to (re)calculate XP-EHH from per-pop iHH values '''
if args.cmsdir is not None:
cmd = args.cmsdir
else:
cmd = ""
cmd += "combine/write_xpehh_from_ihh" #will become redundant with conda
inputtped1 = args.inIhh1
inputtped2 = args.inIhh2
outfilename = args.outfilename
argstring = inputtped1 + " " + inputtped2 + " " + outfilename
cmdstring = cmd + " " + argstring
if args.printOnly:
print(cmdstring)
else:
subprocess.check_call( cmdstring.split() )
return
### Combine input component scores
### in user-defined CMS statistic
def execute_composite_sims(args):
''' given simulated data and component scores (e.g. from likes_from_model.py) together with likelihood tables, generate CMS scores '''
sel_freq_bins = ['0.10', '0.20', '0.30', '0.40', '0.50', '0.60', '0.70', '0.80', '0.90']
if args.cmsdir is not None: #will be able to nix this construction
cmd = args.cmsdir #once conda packaging is complete
else: #but for now, keep things smooth.
cmd = ""
if args.regional_cms:
cmd += "combine/combine_scores_local"
file_ending = ".cms.local.out"
else:
cmd += "combine/combine_scores_gw" #
file_ending = ".cms.gw.out"
model = args.model
selpop = args.simpop
numPerBin_sel = args.nrep_sel
numPerBin_neut = args.nrep_neut
########################################
## SPECIFY INPUT LIKELIHOOD FUNCTIONS ##
########################################
likes_masterDir = args.likes_masterDir
likes_nonSel = args.likes_nonSel
likes_freqSuffix = args.likes_freqSuffix
ihs_master, nsl_master, delihh_master, xpehh_master, fst_master, deldaf_master = get_master_likefiles(likes_masterDir, "gradient_101915_treebase_6_best", selpop, likes_nonSel, likes_freqSuffix)
########################################################
## RECORD INPUT PARAMETERS (scores, MAF filter, etc.) ##
########################################################
writedir = args.writedir
cutoffline = args.cutoffline
includeline = args.includeline
paramfilename = writedir + "run_params.txt"
if args.runSuffix is not None:
paramfilename += args.runSuffix
suffix = args.runSuffix
if "reversedpolarity" in args.runSuffix:
cmd += "_reversedpolarity"
else:
suffix = ""
paramfilename = write_run_paramfile(paramfilename, ihs_master, nsl_master, delihh_master, xpehh_master, fst_master, deldaf_master, cutoffline, includeline)
print("wrote CMS run parameters to: " + paramfilename)
altpops = [1, 2, 3, 4]
selpop = int(selpop)
altpops.remove(selpop)
scenariopop = args.scenariopop
##################################
## CALCULATE CMS: ALL NEUT SIMS ##
##################################
scoremodeldir = writedir + "/"#+ model #+ "/neut/"
compositedir = scoremodeldir + "composite/"
pairdir = scoremodeldir + "pairs/"
check_create_dir(compositedir)
check_create_dir(pairdir)
for irep in range(1, numPerBin_neut +1):
altpairs = []
for altpop in altpops:
in_ihs_file, in_nsl_file, in_delihh_file, in_xp_file, in_fst_deldaf_file = get_sim_component_score_files(model, irep, selpop, altpop, selbin = "neut", filebase = writedir, normed = True)
pairfilename = pairdir + "rep" + str(irep) + "_" + str(selpop) + "_" + str(altpop) + ".pair"
if os.path.isfile(in_ihs_file) and os.path.isfile(in_nsl_file) and os.path.isfile(in_delihh_file) and os.path.isfile(in_xp_file) and os.path.isfile(in_fst_deldaf_file):
write_pair_sourcefile(pairfilename, in_ihs_file, in_delihh_file, in_nsl_file, in_xp_file, in_fst_deldaf_file)
altpairs.append(pairfilename)
if len(altpairs) !=0:
outfile = compositedir + "rep" + str(irep) + "_" + str(selpop) + file_ending + suffix
alreadyExists = False
if args.checkOverwrite:
if not os.path.isfile(outfile): #check for overwrite
alreadyExists = False
else:
alreadyExists = True
if alreadyExists == False:
argstring = outfile + " " + paramfilename + " "
for pairfile in altpairs:
argstring += pairfile + " "
fullcmd = cmd + " " + argstring
print(fullcmd)
#execute(fullcmd)
#################################
## CALCULATE CMS: ALL SEL SIMS ##
#################################
scoremodeldir = writedir + model + "/sel" + str(scenariopop) + "/"
#check_create_dir(scoremodeldir)
for sel_freq_bin in sel_freq_bins:
this_bindir = scoremodeldir + "sel_" + str(sel_freq_bin) + "/"
#check_create_dir(this_bindir)
compositedir = this_bindir + "composite/"
pairdir = this_bindir + "pairs/"
check_create_dir(compositedir)
check_create_dir(pairdir)
for irep in range(1, numPerBin_sel +1):
altpairs = []
for altpop in altpops:
in_ihs_file, in_nsl_file, in_delihh_file, in_xp_file, in_fst_deldaf_file = get_sim_component_score_files(model, irep, selpop, scenariopop, altpop, selbin = sel_freq_bin, filebase = writedir, normed = True)
pairfilename = pairdir + "rep" + str(irep) + "_" + str(selpop) + "_" + str(altpop) + ".pair"
if os.path.isfile(in_ihs_file) and os.path.isfile(in_nsl_file) and os.path.isfile(in_delihh_file) and os.path.isfile(in_xp_file) and os.path.isfile(in_fst_deldaf_file) and os.path.getsize(in_ihs_file) > 0:
write_pair_sourcefile(pairfilename, in_ihs_file, in_delihh_file, in_nsl_file, in_xp_file, in_fst_deldaf_file)
altpairs.append(pairfilename)
else:
print('missing')
print(in_ihs_file, in_nsl_file, in_delihh_file, in_xp_file, in_fst_deldaf_file)
if len(altpairs) !=0:
outfile = compositedir + "rep" + str(irep) + "_" + str(selpop) + file_ending + suffix
alreadyExists = False
if args.checkOverwrite:
if not os.path.isfile(outfile): #check for overwrite
alreadyExists = False
else:
alreadyExists = True
if alreadyExists == False:
argstring = outfile + " " + paramfilename + " "
for pairfile in altpairs:
argstring += pairfile +" "
fullcmd = cmd + " " + argstring
print(fullcmd)
execute(fullcmd)
else:
print(outfile + " already exists")
else:
print("no altpairs")
print('calculated CMS scores for ' + str(numPerBin_neut) + ' neutral replicates and ' + str(numPerBin_sel) + " selection replicates per bin.")
return
def execute_composite_emp(args):
''' given component scores from empirical data (e.g. from scans.py) together with likelihood tables, generate CMS scores '''
model_popsdict = {1:["YRI", "AFR", "LWK", "GWD", "MSL", "ESN", "ASW", "ACB"],
2:["CEU", "EUR", "TSI", "FIN", "GBR", "IBS", "IRN"],
3:["CHB", "EAS", "JPT", "CHS", "CDX", "KHV"],
4:["BEB", "SAS", "GIH", "PJL", "STU", "ITU"],
0:["MXL", "AMR", "PUR", "CLM", "PEL"]} #American populations excluded from model
if args.cmsdir is not None: #will be able to nix this construction
cmd = args.cmsdir #once conda packaging is complete
else: #but for now, keep things smooth.
cmd = ""
if args.regional_cms_chrom is None:
cmd += "combine/combine_scores_gw" #genome-wide
chroms = range(1,23)
file_ending = ".cms.gw.out"
else:
cmd += "combine/combine_scores_local" #within-region
chroms = [args.regional_cms_chrom]
file_ending = ".cms.local.out"
model = args.model
modelPop = args.simpop
score_basedir = args.score_basedir
composite_writedir = args.composite_writedir
########################################
## SPECIFY INPUT LIKELIHOOD FUNCTIONS ##
########################################
likes_masterDir = args.likes_masterDir
likes_nonSel = args.likes_nonSel
likes_freqSuffix = args.likes_freqSuffix
ihs_master, nsl_master, delihh_master, xpehh_master, fst_master, deldaf_master = get_master_likefiles(likes_masterDir, model, modelPop, likes_nonSel, likes_freqSuffix)
#build in a check here to enforce correct likes for within-region vs. genomew-wide?
#########################
## DESIGNATE OUTGROUPS ##
#########################
emp_selpop = args.emppop
model_selpop = 0
for modelpop in [1, 2, 3, 4, 0]:
if emp_selpop in model_popsdict[modelpop]:
model_selpop = modelpop
if model_selpop == 0:
model_selpop = 4
altmodelpops = [1, 2, 3, 4]
altmodelpops.remove(model_selpop)
altpops = []
for altpop in altmodelpops:
altpops.append(model_popsdict[altpop][0])
########################################################
## RECORD INPUT PARAMETERS (scores, MAF filter, etc.) ##
########################################################
writedir = args.writedir
cutoffline = args.cutoffline
includeline = args.includeline
paramfilename = writedir + "run_params.txt"
if args.runSuffix is not None:
paramfilename += args.runSuffix
suffix = args.runSuffix
else:
suffix = ""
paramfilename += "_" + str(model_selpop)
paramfilename = write_run_paramfile(paramfilename, ihs_master, nsl_master, delihh_master, xpehh_master, fst_master, deldaf_master, cutoffline, includeline)
print("wrote CMS run parameters to: " + paramfilename)
#############################################
## CALCULATE CMS: ITERATE OVER CHROMOSOMES ##
#############################################
for chrom in chroms:
altpairs = []
for altpop in altpops:
in_ihs_file, in_delihh_file, in_nsl_file, in_xp_file, in_fst_deldaf_file = get_emp_component_score_files(chrom, emp_selpop, altpop=altpop, basedir = score_basedir)
for inputfile in [in_ihs_file, in_nsl_file, in_delihh_file, in_xp_file, in_fst_deldaf_file]:
check_zip(inputfile)
pairdir = score_basedir + "pairs/"
mkdir_pairdir_cmd = "mkdir -p " + pairdir
subprocess.check_output( mkdir_pairdir_cmd.split() )
pairfilename = score_basedir + "pairs/chr" + str(chrom) + "_" + str(emp_selpop) + "_" + str(altpop) + ".pair"
if os.path.isfile(in_ihs_file) and os.path.isfile(in_nsl_file) and os.path.isfile(in_delihh_file) and os.path.isfile(in_xp_file) and os.path.isfile(in_fst_deldaf_file):
write_pair_sourcefile(pairfilename, in_ihs_file, in_delihh_file, in_nsl_file, in_xp_file, in_fst_deldaf_file)
altpairs.append(pairfilename)
if len(altpairs) !=0:
outfile = composite_writedir
if outfile[-1] != "/":
outfile += "/"
if args.regional_cms_chrom is not None:
outfile += "regional/"
else:
outfile += "gw/"
outfile += "chr" + str(chrom) + "_" + str(emp_selpop) + file_ending + suffix
alreadyExists = False
if args.checkOverwrite:
if not os.path.isfile(outfile): #check for overwrite
alreadyExists = False
else:
alreadyExists = True
if alreadyExists == False:
argstring = outfile + " " + paramfilename + " "
for pairfile in altpairs:
argstring += pairfile + " "
fullcmd = cmd + " " + argstring
print(fullcmd)
execute(fullcmd)
print('calculated CMS scores for ' + str(len(chroms)) + ' chromosomes.')
return
def execute_normsims_genomewide(args):
""" given output from composite_sims, normalize all replicates to neutral parameters """
sel_freq_bins = ['0.10', '0.20', '0.30', '0.40', '0.50', '0.60', '0.70', '0.80', '0.90']
model = args.model
selpop = args.simpop
numPerBin_sel = args.nrep_sel
numPerBin_neut = args.nrep_neut
writedir = args.writedir
suffix = args.runSuffix
values = []
##############################
## LOAD STATS FROM NEUT SIMS #
##############################
for irep in range(1, numPerBin_neut +1):
outfile = get_neut_repfile_name(model, irep, selpop, suffix = suffix, normed=False, basedir=writedir)
if os.path.isfile(outfile):
openfile = open(outfile, 'r')
header = openfile.readline()
for line in openfile:
entries = line.split()
rawscore = np.log(float(entries[-1]))
values.append(rawscore)
openfile.close()
else:
print('missing: ' + outfile)
print('loaded ' + str(len(values)) + ' values from neutral sims...')
#check for nans
values = np.array(values)
values = values[~np.isnan(values)]
values = list(values)
#check for infs
values = np.array(values)
values = values[~np.isinf(values)]
values = list(values)
mean = np.mean(values)
var = np.var(values)
sd = np.sqrt(var)
print("max: " + str(max(values)))
print("min: " + str(min(values)))
print("mean: " + str(np.mean(values)))
print("var: " + str(np.var(values)))
############################
## NORMALIZE NEUTRAL SIMS ##
############################
for irep in range(1, numPerBin_neut +1):
outfile = get_neut_repfile_name(model, irep, selpop, suffix = suffix, normed=False, basedir=writedir)
if os.path.isfile(outfile):
normedfile = outfile + ".norm"#.z"
if True:
#if not os.path.isfile(normedfile): #CHANGE FOR --checkOverwrite
openfile = open(outfile, 'r')
writefile = open(normedfile, 'w')
header = openfile.readline()
writefile.write(header)
for line in openfile:
entries = line.split()
rawscore = np.log(float(entries[-1]))
normalized = normalize(rawscore, mean, sd)
writeline = line.strip('\n') + "\t" + str(normalized)+ "\n"
writefile.write(writeline)
openfile.close()
writefile.close()
print("wrote to eg: " + normedfile)
########################
## NORMALIZE SEL SIMS ##
########################
for sel_freq_bin in sel_freq_bins:
for irep in range(1, numPerBin_sel +1):
rawfile = get_sel_repfile_name(model, irep, selpop, sel_freq_bin, suffix=suffix, normed = False, basedir=writedir)
#print(rawfile)
if os.path.isfile(rawfile):
normedfile = rawfile + ".norm"#.z"
if True:
#if not os.path.isfile(normedfile):
openfile = open(rawfile, 'r')
writefile = open(normedfile, 'w')
header = openfile.readline()
writefile.write(header)
for line in openfile:
entries = line.split()
rawscore = np.log(float(entries[-1]))
normalized = normalize(rawscore, mean, sd)
writeline = line.strip('\n') + "\t" + str(normalized) + "\n"
writefile.write(writeline)
openfile.close()
writefile.close()
print("wrote to eg: " + normedfile)
return
def execute_normemp_genomewide(args):
""" given output from composite_emp, normalize CMS scores genome-wide """ #could also introduce a feature to normalize to explicitly neutral regions.
selpop = args.emppop
score_basedir = args.score_basedir
if args.runSuffix is not None:
suffix = args.runSuffix
else:
suffix = ""
clr_scores = load_empscores(selpop, normed=False, suffix=suffix, basedir=score_basedir)
logscores = [np.log(item) for item in clr_scores]
scores = logscores
#check for nans
scores = np.array(scores)
scores = scores[~np.isnan(scores)]
scores = list(scores)
#check for infs
scores = np.array(scores)
scores = scores[~np.isinf(scores)]
scores = list(scores)
print('loaded ' + str(len(scores)) + " scores")
print("max: " + str(max(scores)))
print("min: " + str(min(scores)))
print("mean: " + str(np.mean(scores)))
print("var: " + str(np.var(scores)))
mean = np.mean(scores)
var = np.var(scores)
sd = np.sqrt(var)
##############
## NORMALIZE #
##############
chroms = range(1,23)
for thischrom in chroms:
unnormedfile = get_emp_cms_file(selpop, thischrom, normed=False, basedir=score_basedir, suffix=suffix,) #model #selpop, chrom, normed=False, suffix=suffix, basedir = score_basedir)
assert os.path.isfile(unnormedfile)
normedfile = unnormedfile + ".norm.z"
readfile = open(unnormedfile, 'r')
writefile = open(normedfile, 'w')
readfile.readline() #header
for line in readfile:
line = line.strip('\n')
entries=line.split()
rawscore = np.log(float(entries[-1]))
normedscore = normalize(rawscore, mean, sd)
writeline = line + "\t" + str(normedscore) + '\n'
writefile.write(writeline)
readfile.close()
writefile.close
print('wrote to ' + normedfile)
return
### Visualize and hone in
### on variants within regions
def execute_hapviz(args):
''' view haplotype data as a colored grid. original Shervin Tabrizi update Joe Vitti '''
##############
## LOAD DATA##
##############
inputfilename = args.inputfile
if ".hap" in inputfilename:
haplotypes, coreindex, physpositions = load_from_hap(inputfilename, args.maf, corePos = args.corepos)
else:
if args.startpos is None or args.endpos is None:
print("must provide bounds with --startpos and --endpos")
sys.exit(0)
else:
startpos = int(args.startpos)
endpos = int(args.endpos)
haplotypes, coreindex, physpositions = pullRegion(inputfilename, startpos, endpos, args.maf, corePos = args.corepos)
print("loaded genotypes for " + str(len(haplotypes[0])) + " sites... ")
########################
## SORT BY SIMILARITY ##
########################
if args.corepos is not -1:
hap = hapSort_coreallele(haplotypes, coreindex)
else:
hap = hapSort(haplotypes)
##########
## PLOT ##
##########
fig = plt.figure()
ax = fig.add_subplot(111)
hapViz(ax, hap[0], args.out)
if args.annotate is not None:
positions, annotations = readAnnotations(args.annotate)
ylim = ax.axis()[-1]
for i_snppos in range(len(positions)):
snppos = positions[i_snppos]
annotation = annotations[i_snppos]
if int(snppos) in physpositions:
foundindex = physpositions.index(int(snppos))
ax.plot(foundindex, ylim, "v", color="black", markersize=1)
ax.plot(foundindex, -5, "^", color="black", markersize=1)
ax.text(foundindex, -35, str(snppos) +"\n" + annotation, fontsize=2, horizontalalignment='center')
else: #find nearest and draw a line
dif = [item - int(snppos) for item in physpositions]
minDif = min(dif)
minDifIndex = dif.index(minDif)
print(str(minDifIndex))
ax.axvline(minDifIndex, color="orange")
print('found nearest proxy variant based on physical distance ' + str(physpositions[minDifIndex]))
if args.title is not None:
plt.title(args.title, fontsize=5)
plt.tight_layout()
plt.savefig(args.out, dpi=float(args.dpi))
print("plotted to: " + args.out)
plt.close()
return
def execute_ml_region(args):
''' perform within-region localization using machine learning algorithm '''
#chrom, startBp, endBp = args.chrom, args.startBp, args.endBp
#IN PROGRESS
return
def execute_ucsc_viz(args):
''' write score/position data to file for visualization in UCSC genome browser '''
#convertBedGraph
inprefix = args.infile_prefix
outfilename = args.outfile
outfile = open(outfilename, 'w')
for chrom in [1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 3, 4, 5, 6, 7, 8, 9]: #BEDGRAPH MUST BE CASE-SENSITIVE SORTED
chromfile = inprefix + ".chr" + str(chrom) + ".txt.norm"
assert os.path.isfile(chromfile)
infile = open(chromfile, 'r')
if args.strip_header:
infile.readline()
for line in infile:
entries = line.strip('\n').split()
startPos = int(entries[int(args.posIndex)])
score = float(entries[int(args.scoreIndex)]) #use normalized value
writestring = "chr" + str(chrom) + "\t" + str(startPos) + "\t" + str(startPos + 1) + "\t" + str(score) + "\n"
outfile.write(writestring)
infile.close()
outfile.close()
print("wrote to: " + outfilename)
#convertBedGraphtoBigWig:
print("for large datasets, convert to BigWig format, e.g.: http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/bedGraphToBigWig\n")
return
##########
## MAIN ##
##########
if __name__ == '__main__':
runparser = full_parser_composite()
args = runparser.parse_args()
# if called with no arguments, print help
if len(sys.argv)==1:
runparser.parse_args(['--help'])
elif len(sys.argv)==2:
runparser.parse_args([sys.argv[1], '--help'])
subcommand = sys.argv[1]
function_name = 'execute_' + subcommand + "(args)"
eval(function_name) #points to functions defined above, which wrap other programs in the pipeline
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.container_v1.proto import cluster_service_pb2_grpc
class ClusterManagerGrpcTransport(object):
"""gRPC transport class providing stubs for
google.container.v1 ClusterManager API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="container.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"cluster_manager_stub": cluster_service_pb2_grpc.ClusterManagerStub(channel)
}
@classmethod
def create_channel(cls, address="container.googleapis.com:443", credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def list_clusters(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists all clusters owned by a project in either the specified zone or all
zones.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].ListClusters
@property
def get_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the details of a specific cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].GetCluster
@property
def create_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a cluster, consisting of the specified number and type of Google
Compute Engine instances.
By default, the cluster is created in the project's `default
network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
One firewall is added for the cluster. After cluster creation, the
cluster creates routes for each node to allow the containers on that
node to communicate with all other instances in the cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].CreateCluster
@property
def update_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates the settings of a specific cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].UpdateCluster
@property
def update_node_pool(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates the version and/or image type for a specific node pool.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].UpdateNodePool
@property
def set_node_pool_autoscaling(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the autoscaling settings for a specific node pool.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetNodePoolAutoscaling
@property
def set_logging_service(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the logging service for a specific cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetLoggingService
@property
def set_monitoring_service(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the monitoring service for a specific cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetMonitoringService
@property
def set_addons_config(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the addons for a specific cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetAddonsConfig
@property
def set_locations(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the locations for a specific cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetLocations
@property
def update_master(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates the master for a specific cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].UpdateMaster
@property
def set_master_auth(self):
"""Return the gRPC stub for {$apiMethod.name}.
Used to set master auth materials. Currently supports :-
Changing the admin password for a specific cluster.
This can be either via password generation or explicitly set the password.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetMasterAuth
@property
def delete_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes the cluster, including the Kubernetes endpoint and all worker
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].DeleteCluster
@property
def list_operations(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists all operations in a project in a specific zone or all zones.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].ListOperations
@property
def get_operation(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the specified operation.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].GetOperation
@property
def cancel_operation(self):
"""Return the gRPC stub for {$apiMethod.name}.
Cancels the specified operation.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].CancelOperation
@property
def get_server_config(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns configuration info about the Kubernetes Engine service.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].GetServerConfig
@property
def list_node_pools(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists the node pools for a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].ListNodePools
@property
def get_node_pool(self):
"""Return the gRPC stub for {$apiMethod.name}.
Retrieves the node pool requested.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].GetNodePool
@property
def create_node_pool(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a node pool for a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].CreateNodePool
@property
def delete_node_pool(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a node pool from a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].DeleteNodePool
@property
def rollback_node_pool_upgrade(self):
"""Return the gRPC stub for {$apiMethod.name}.
Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].RollbackNodePoolUpgrade
@property
def set_node_pool_management(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the NodeManagement options for a node pool.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetNodePoolManagement
@property
def set_labels(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets labels on a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetLabels
@property
def set_legacy_abac(self):
"""Return the gRPC stub for {$apiMethod.name}.
Enables or disables the ABAC authorization mechanism on a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetLegacyAbac
@property
def start_i_p_rotation(self):
"""Return the gRPC stub for {$apiMethod.name}.
Start master IP rotation.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].StartIPRotation
@property
def complete_i_p_rotation(self):
"""Return the gRPC stub for {$apiMethod.name}.
Completes master IP rotation.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].CompleteIPRotation
@property
def set_node_pool_size(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the size for a specific node pool.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetNodePoolSize
@property
def set_network_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Enables/Disables Network Policy for a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetNetworkPolicy
@property
def set_maintenance_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the maintenance policy for a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["cluster_manager_stub"].SetMaintenancePolicy
| |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_str,
)
from ..utils import (
ExtractorError,
determine_ext,
find_xpath_attr,
fix_xml_ampersands,
GeoRestrictedError,
int_or_none,
parse_duration,
strip_or_none,
try_get,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_url_query,
urljoin,
xpath_text,
)
class RaiBaseIE(InfoExtractor):
_UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
_GEO_COUNTRIES = ['IT']
_GEO_BYPASS = False
def _extract_relinker_info(self, relinker_url, video_id):
if not re.match(r'https?://', relinker_url):
return {'formats': [{'url': relinker_url}]}
formats = []
geoprotection = None
is_live = None
duration = None
for platform in ('mon', 'flash', 'native'):
relinker = self._download_xml(
relinker_url, video_id,
note='Downloading XML metadata for platform %s' % platform,
transform_source=fix_xml_ampersands,
query={'output': 45, 'pl': platform},
headers=self.geo_verification_headers())
if not geoprotection:
geoprotection = xpath_text(
relinker, './geoprotection', default=None) == 'Y'
if not is_live:
is_live = xpath_text(
relinker, './is_live', default=None) == 'Y'
if not duration:
duration = parse_duration(xpath_text(
relinker, './duration', default=None))
url_elem = find_xpath_attr(relinker, './url', 'type', 'content')
if url_elem is None:
continue
media_url = url_elem.text
# This does not imply geo restriction (e.g.
# http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html)
if media_url == 'http://download.rai.it/video_no_available.mp4':
continue
ext = determine_ext(media_url)
if (ext == 'm3u8' and platform != 'mon') or (ext == 'f4m' and platform != 'flash'):
continue
if ext == 'm3u8' or 'format=m3u8' in media_url or platform == 'mon':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'f4m' or platform == 'flash':
manifest_url = update_url_query(
media_url.replace('manifest#live_hds.f4m', 'manifest.f4m'),
{'hdcore': '3.7.0', 'plugin': 'aasp-3.7.0.39.44'})
formats.extend(self._extract_f4m_formats(
manifest_url, video_id, f4m_id='hds', fatal=False))
else:
bitrate = int_or_none(xpath_text(relinker, 'bitrate'))
formats.append({
'url': media_url,
'tbr': bitrate if bitrate > 0 else None,
'format_id': 'http-%d' % bitrate if bitrate > 0 else 'http',
})
if not formats and geoprotection is True:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
return dict((k, v) for k, v in {
'is_live': is_live,
'duration': duration,
'formats': formats,
}.items() if v is not None)
@staticmethod
def _extract_subtitles(url, subtitle_url):
subtitles = {}
if subtitle_url and isinstance(subtitle_url, compat_str):
subtitle_url = urljoin(url, subtitle_url)
STL_EXT = '.stl'
SRT_EXT = '.srt'
subtitles['it'] = [{
'ext': 'stl',
'url': subtitle_url,
}]
if subtitle_url.endswith(STL_EXT):
srt_url = subtitle_url[:-len(STL_EXT)] + SRT_EXT
subtitles['it'].append({
'ext': 'srt',
'url': srt_url,
})
return subtitles
class RaiPlayIE(RaiBaseIE):
_VALID_URL = r'(?P<url>https?://(?:www\.)?raiplay\.it/.+?-(?P<id>%s)\.html)' % RaiBaseIE._UUID_RE
_TESTS = [{
'url': 'http://www.raiplay.it/video/2016/10/La-Casa-Bianca-e06118bb-59a9-4636-b914-498e4cfd2c66.html?source=twitter',
'md5': '340aa3b7afb54bfd14a8c11786450d76',
'info_dict': {
'id': 'e06118bb-59a9-4636-b914-498e4cfd2c66',
'ext': 'mp4',
'title': 'La Casa Bianca',
'alt_title': 'S2016 - Puntata del 23/10/2016',
'description': 'md5:a09d45890850458077d1f68bb036e0a5',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Rai 3',
'creator': 'Rai 3',
'duration': 3278,
'timestamp': 1477764300,
'upload_date': '20161029',
'series': 'La Casa Bianca',
'season': '2016',
},
}, {
'url': 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
'md5': '8970abf8caf8aef4696e7b1f2adfc696',
'info_dict': {
'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
'ext': 'mp4',
'title': 'Report del 07/04/2014',
'alt_title': 'S2013/14 - Puntata del 07/04/2014',
'description': 'md5:f27c544694cacb46a078db84ec35d2d9',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Rai 5',
'creator': 'Rai 5',
'duration': 6160,
'series': 'Report',
'season_number': 5,
'season': '2013/14',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.raiplay.it/video/2016/11/gazebotraindesi-efebe701-969c-4593-92f3-285f0d1ce750.html?',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
url, video_id = mobj.group('url', 'id')
media = self._download_json(
'%s?json' % url, video_id, 'Downloading video JSON')
title = media['name']
video = media['video']
relinker_info = self._extract_relinker_info(video['contentUrl'], video_id)
self._sort_formats(relinker_info['formats'])
thumbnails = []
if 'images' in media:
for _, value in media.get('images').items():
if value:
thumbnails.append({
'url': value.replace('[RESOLUTION]', '600x400')
})
timestamp = unified_timestamp(try_get(
media, lambda x: x['availabilities'][0]['start'], compat_str))
subtitles = self._extract_subtitles(url, video.get('subtitles'))
info = {
'id': video_id,
'title': self._live_title(title) if relinker_info.get(
'is_live') else title,
'alt_title': media.get('subtitle'),
'description': media.get('description'),
'uploader': strip_or_none(media.get('channel')),
'creator': strip_or_none(media.get('editor')),
'duration': parse_duration(video.get('duration')),
'timestamp': timestamp,
'thumbnails': thumbnails,
'series': try_get(
media, lambda x: x['isPartOf']['name'], compat_str),
'season_number': int_or_none(try_get(
media, lambda x: x['isPartOf']['numeroStagioni'])),
'season': media.get('stagione') or None,
'subtitles': subtitles,
}
info.update(relinker_info)
return info
class RaiPlayLiveIE(RaiBaseIE):
_VALID_URL = r'https?://(?:www\.)?raiplay\.it/dirette/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://www.raiplay.it/dirette/rainews24',
'info_dict': {
'id': 'd784ad40-e0ae-4a69-aa76-37519d238a9c',
'display_id': 'rainews24',
'ext': 'mp4',
'title': 're:^Diretta di Rai News 24 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:6eca31500550f9376819f174e5644754',
'uploader': 'Rai News 24',
'creator': 'Rai News 24',
'is_live': True,
},
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'data-uniquename=["\']ContentItem-(%s)' % RaiBaseIE._UUID_RE,
webpage, 'content id')
return {
'_type': 'url_transparent',
'ie_key': RaiPlayIE.ie_key(),
'url': 'http://www.raiplay.it/dirette/ContentItem-%s.html' % video_id,
'id': video_id,
'display_id': display_id,
}
class RaiPlayPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?raiplay\.it/programmi/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.raiplay.it/programmi/nondirloalmiocapo/',
'info_dict': {
'id': 'nondirloalmiocapo',
'title': 'Non dirlo al mio capo',
'description': 'md5:9f3d603b2947c1c7abb098f3b14fac86',
},
'playlist_mincount': 12,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._html_search_meta(
('programma', 'nomeProgramma'), webpage, 'title')
description = unescapeHTML(self._html_search_meta(
('description', 'og:description'), webpage, 'description'))
entries = []
for mobj in re.finditer(
r'<a\b[^>]+\bhref=(["\'])(?P<path>/raiplay/video/.+?)\1',
webpage):
video_url = urljoin(url, mobj.group('path'))
entries.append(self.url_result(
video_url, ie=RaiPlayIE.ie_key(),
video_id=RaiPlayIE._match_id(video_url)))
return self.playlist_result(entries, playlist_id, title, description)
class RaiIE(RaiBaseIE):
_VALID_URL = r'https?://[^/]+\.(?:rai\.(?:it|tv)|rainews\.it)/.+?-(?P<id>%s)(?:-.+?)?\.html' % RaiBaseIE._UUID_RE
_TESTS = [{
# var uniquename = "ContentItem-..."
# data-id="ContentItem-..."
'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
'info_dict': {
'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
'ext': 'mp4',
'title': 'TG PRIMO TEMPO',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1758,
'upload_date': '20140612',
}
}, {
# with ContentItem in many metas
'url': 'http://www.rainews.it/dl/rainews/media/Weekend-al-cinema-da-Hollywood-arriva-il-thriller-di-Tate-Taylor-La-ragazza-del-treno-1632c009-c843-4836-bb65-80c33084a64b.html',
'info_dict': {
'id': '1632c009-c843-4836-bb65-80c33084a64b',
'ext': 'mp4',
'title': 'Weekend al cinema, da Hollywood arriva il thriller di Tate Taylor "La ragazza del treno"',
'description': 'I film in uscita questa settimana.',
'thumbnail': r're:^https?://.*\.png$',
'duration': 833,
'upload_date': '20161103',
}
}, {
# with ContentItem in og:url
'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-efb17665-691c-45d5-a60c-5301333cbb0c.html',
'md5': '11959b4e44fa74de47011b5799490adf',
'info_dict': {
'id': 'efb17665-691c-45d5-a60c-5301333cbb0c',
'ext': 'mp4',
'title': 'TG1 ore 20:00 del 03/11/2016',
'description': 'TG1 edizione integrale ore 20:00 del giorno 03/11/2016',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2214,
'upload_date': '20161103',
}
}, {
# drawMediaRaiTV(...)
'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html',
'md5': '2dd727e61114e1ee9c47f0da6914e178',
'info_dict': {
'id': '59d69d28-6bb6-409d-a4b5-ed44096560af',
'ext': 'mp4',
'title': 'Il pacco',
'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20141221',
},
}, {
# initEdizione('ContentItem-...'
'url': 'http://www.tg1.rai.it/dl/tg1/2010/edizioni/ContentSet-9b6e0cba-4bef-4aef-8cf0-9f7f665b7dfb-tg1.html?item=undefined',
'info_dict': {
'id': 'c2187016-8484-4e3a-8ac8-35e475b07303',
'ext': 'mp4',
'title': r're:TG1 ore \d{2}:\d{2} del \d{2}/\d{2}/\d{4}',
'duration': 2274,
'upload_date': '20170401',
},
'skip': 'Changes daily',
}, {
# HDS live stream with only relinker URL
'url': 'http://www.rai.tv/dl/RaiTV/dirette/PublishingBlock-1912dbbf-3f96-44c3-b4cf-523681fbacbc.html?channel=EuroNews',
'info_dict': {
'id': '1912dbbf-3f96-44c3-b4cf-523681fbacbc',
'ext': 'flv',
'title': 'EuroNews',
},
'params': {
'skip_download': True,
},
}, {
# HLS live stream with ContentItem in og:url
'url': 'http://www.rainews.it/dl/rainews/live/ContentItem-3156f2f2-dc70-4953-8e2f-70d7489d4ce9.html',
'info_dict': {
'id': '3156f2f2-dc70-4953-8e2f-70d7489d4ce9',
'ext': 'mp4',
'title': 'La diretta di Rainews24',
},
'params': {
'skip_download': True,
},
}, {
# Direct MMS URL
'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-b63a4089-ac28-48cf-bca5-9f5b5bc46df5.html',
'only_matching': True,
}, {
'url': 'https://www.rainews.it/tgr/marche/notiziari/video/2019/02/ContentItem-6ba945a2-889c-4a80-bdeb-8489c70a8db9.html',
'only_matching': True,
}]
def _extract_from_content_id(self, content_id, url):
media = self._download_json(
'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % content_id,
content_id, 'Downloading video JSON')
title = media['name'].strip()
media_type = media['type']
if 'Audio' in media_type:
relinker_info = {
'formats': [{
'format_id': media.get('formatoAudio'),
'url': media['audioUrl'],
'ext': media.get('formatoAudio'),
}]
}
elif 'Video' in media_type:
relinker_info = self._extract_relinker_info(media['mediaUri'], content_id)
else:
raise ExtractorError('not a media file')
self._sort_formats(relinker_info['formats'])
thumbnails = []
for image_type in ('image', 'image_medium', 'image_300'):
thumbnail_url = media.get(image_type)
if thumbnail_url:
thumbnails.append({
'url': compat_urlparse.urljoin(url, thumbnail_url),
})
subtitles = self._extract_subtitles(url, media.get('subtitlesUrl'))
info = {
'id': content_id,
'title': title,
'description': strip_or_none(media.get('desc')),
'thumbnails': thumbnails,
'uploader': media.get('author'),
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(media.get('length')),
'subtitles': subtitles,
}
info.update(relinker_info)
return info
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
content_item_id = None
content_item_url = self._html_search_meta(
('og:url', 'og:video', 'og:video:secure_url', 'twitter:url',
'twitter:player', 'jsonlink'), webpage, default=None)
if content_item_url:
content_item_id = self._search_regex(
r'ContentItem-(%s)' % self._UUID_RE, content_item_url,
'content item id', default=None)
if not content_item_id:
content_item_id = self._search_regex(
r'''(?x)
(?:
(?:initEdizione|drawMediaRaiTV)\(|
<(?:[^>]+\bdata-id|var\s+uniquename)=
)
(["\'])
(?:(?!\1).)*\bContentItem-(?P<id>%s)
''' % self._UUID_RE,
webpage, 'content item id', default=None, group='id')
content_item_ids = set()
if content_item_id:
content_item_ids.add(content_item_id)
if video_id not in content_item_ids:
content_item_ids.add(video_id)
for content_item_id in content_item_ids:
try:
return self._extract_from_content_id(content_item_id, url)
except GeoRestrictedError:
raise
except ExtractorError:
pass
relinker_url = self._search_regex(
r'''(?x)
(?:
var\s+videoURL|
mediaInfo\.mediaUri
)\s*=\s*
([\'"])
(?P<url>
(?:https?:)?
//mediapolis(?:vod)?\.rai\.it/relinker/relinkerServlet\.htm\?
(?:(?!\1).)*\bcont=(?:(?!\1).)+)\1
''',
webpage, 'relinker URL', group='url')
relinker_info = self._extract_relinker_info(
urljoin(url, relinker_url), video_id)
self._sort_formats(relinker_info['formats'])
title = self._search_regex(
r'var\s+videoTitolo\s*=\s*([\'"])(?P<title>[^\'"]+)\1',
webpage, 'title', group='title',
default=None) or self._og_search_title(webpage)
info = {
'id': video_id,
'title': title,
}
info.update(relinker_info)
return info
| |
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for Dell Storage Center."""
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_common
LOG = logging.getLogger(__name__)
@interface.volumedriver
class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
driver.ISCSIDriver):
"""Implements commands for Dell Storage Center ISCSI management.
To enable the driver add the following line to the cinder configuration:
volume_driver=cinder.volume.drivers.dell.dell_storagecenter_iscsi.\
DellStorageCenterISCSIDriver
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.1.0 - Added extra spec support for Storage Profile selection
1.2.0 - Added consistency group support.
2.0.0 - Switched to inheriting functional objects rather than volume
driver.
2.1.0 - Added support for ManageableVD.
2.2.0 - Driver retype support for switching volume's Storage Profile.
Added API 2.2 support.
2.3.0 - Added Legacy Port Mode Support
2.3.1 - Updated error handling.
2.4.0 - Added Replication V2 support.
2.4.1 - Updated Replication support to V2.1.
2.5.0 - ManageableSnapshotsVD implemented.
3.0.0 - ProviderID utilized.
3.1.0 - Failback Supported.
3.2.0 - Live Volume support.
"""
VERSION = '3.2.0'
CI_WIKI_NAME = "Dell_Storage_CI"
def __init__(self, *args, **kwargs):
super(DellStorageCenterISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name = (
self.configuration.safe_get('volume_backend_name') or 'Dell-iSCSI')
def initialize_connection(self, volume, connector):
# Initialize_connection will find or create a server identified by the
# connector on the Dell backend. It will then map the volume to it
# and return the properties as follows..
# {'driver_volume_type': 'iscsi',
# data = {'target_discovered': False,
# 'target_iqn': preferred iqn,
# 'target_iqns': all iqns,
# 'target_portal': preferred portal,
# 'target_portals': all portals,
# 'target_lun': preferred lun,
# 'target_luns': all luns,
# }
# We use id to name the volume name as it is a
# known unique name.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
islivevol = self._is_live_vol(volume)
initiator_name = connector.get('initiator')
multipath = connector.get('multipath', False)
LOG.info(_LI('initialize_ connection: %(vol)s:%(pid)s:'
'%(intr)s. Multipath is %(mp)r'),
{'vol': volume_name,
'pid': provider_id,
'intr': initiator_name,
'mp': multipath})
with self._client.open_connection() as api:
try:
# Find the volume on the storage center. Note that if this
# is live volume and we are swapped this will be the back
# half of the live volume.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Find our server.
scserver = api.find_server(initiator_name, ssn)
# No? Create it.
if scserver is None:
scserver = api.create_server(
[initiator_name],
self.configuration.dell_server_os, ssn)
# if we have a server and a volume lets bring them
# together.
if scserver is not None:
mapping = api.map_volume(scvolume, scserver)
if mapping is not None:
# Since we just mapped our volume we had best
# update our sc volume object.
scvolume = api.get_volume(scvolume['instanceId'])
# Our return.
iscsiprops = {}
# Three cases that should all be satisfied with the
# same return of Target_Portal and Target_Portals.
# 1. Nova is calling us so we need to return the
# Target_Portal stuff. It should ignore the
# Target_Portals stuff.
# 2. OS brick is calling us in multipath mode so we
# want to return Target_Portals. It will ignore
# the Target_Portal stuff.
# 3. OS brick is calling us in single path mode so
# we want to return Target_Portal and
# Target_Portals as alternates.
iscsiprops = api.find_iscsi_properties(scvolume)
# If this is a live volume we need to map up our
# secondary volume. Note that if we have failed
# over we do not wish to do this.
if islivevol:
sclivevolume = api.get_live_volume(provider_id)
# Only map if we are not failed over.
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
secondaryprops = self.initialize_secondary(
api, sclivevolume, initiator_name)
# Combine with iscsiprops
iscsiprops['target_iqns'] += (
secondaryprops['target_iqns'])
iscsiprops['target_portals'] += (
secondaryprops['target_portals'])
iscsiprops['target_luns'] += (
secondaryprops['target_luns'])
# Return our iscsi properties.
iscsiprops['discard'] = True
return {'driver_volume_type': 'iscsi',
'data': iscsiprops}
# Re-raise any backend exception.
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to initialize connection'))
# If there is a data structure issue then detail the exception
# and bail with a Backend Exception.
except Exception as error:
LOG.error(error)
raise exception.VolumeBackendAPIException(error)
# We get here because our mapping is none or we have no valid iqn to
# return so blow up.
raise exception.VolumeBackendAPIException(
_('Unable to map volume'))
def initialize_secondary(self, api, sclivevolume, initiatorname):
"""Initialize the secondary connection of a live volume pair.
:param api: Dell SC api.
:param sclivevolume: Dell SC live volume object.
:param initiatorname: Cinder iscsi initiator from the connector.
:return: ISCSI properties.
"""
# Find our server.
secondary = api.find_server(initiatorname,
sclivevolume['secondaryScSerialNumber'])
# No? Create it.
if secondary is None:
secondary = api.create_server(
[initiatorname], self.configuration.dell_server_os,
sclivevolume['secondaryScSerialNumber'])
if secondary:
if api.map_secondary_volume(sclivevolume, secondary):
# Get our volume and get our properties.
secondaryvol = api.get_volume(
sclivevolume['secondaryVolume']['instanceId'])
if secondaryvol:
return api.find_iscsi_properties(secondaryvol)
# Dummy return on failure.
data = {'target_discovered': False,
'target_iqn': None,
'target_iqns': [],
'target_portal': None,
'target_portals': [],
'target_lun': None,
'target_luns': [],
}
LOG.warning(_LW('Unable to map live volume secondary volume'
' %(vol)s to secondary server intiator: %(init)r'),
{'vol': sclivevolume['secondaryVolume']['instanceName'],
'init': initiatorname})
return data
def terminate_connection(self, volume, connector, force=False, **kwargs):
# Grab some initial info.
initiator_name = connector.get('initiator')
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
islivevol = self._is_live_vol(volume)
LOG.debug('Terminate connection: %(vol)s:%(initiator)s',
{'vol': volume_name,
'initiator': initiator_name})
with self._client.open_connection() as api:
try:
# Find the volume on the storage center. Note that if this
# is live volume and we are swapped this will be the back
# half of the live volume.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Find our server.
scserver = api.find_server(initiator_name, ssn)
# Unmap our secondary if not failed over..
if islivevol:
sclivevolume = api.get_live_volume(provider_id)
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
self.terminate_secondary(api, sclivevolume,
initiator_name)
# If we have a server and a volume lets pull them apart.
if (scserver is not None and
scvolume is not None and
api.unmap_volume(scvolume, scserver) is True):
LOG.debug('Connection terminated')
return
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to terminate connection '
'%(initiator)s %(vol)s'),
{'initiator': initiator_name,
'vol': volume_name})
raise exception.VolumeBackendAPIException(
_('Terminate connection failed'))
def terminate_secondary(self, api, sclivevolume, initiatorname):
# Find our server.
secondary = api.find_server(initiatorname,
sclivevolume['secondaryScSerialNumber'])
secondaryvol = api.get_volume(
sclivevolume['secondaryVolume']['instanceId'])
return api.unmap_volume(secondaryvol, secondary)
| |
import os
import os.path as op
import warnings
import gc
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne.io import Raw
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution)
from mne import SourceEstimate, pick_types_forward, read_evokeds
from mne.label import read_label
from mne.utils import (requires_mne, run_subprocess, _TempDir,
run_tests_if_main, slow_test)
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
fname_mri = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
def compare_forwards(f1, f2):
"""Helper to compare two potentially converted forward solutions"""
assert_allclose(f1['sol']['data'], f2['sol']['data'])
assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
assert_allclose(f1['source_nn'], f2['source_nn'])
if f1['sol_grad'] is not None:
assert_true(f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
else:
assert_true(f2['sol_grad'] is None)
assert_equal(f1['source_ori'], f2['source_ori'])
assert_equal(f1['surf_ori'], f2['surf_ori'])
@testing.requires_testing_data
def test_convert_forward():
"""Test converting forward solution between different representations
"""
fwd = read_forward_solution(fname_meeg_grad)
assert_true(repr(fwd))
assert_true(isinstance(fwd, Forward))
# look at surface orientation
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
compare_forwards(fwd_surf, fwd_surf_io)
del fwd_surf_io
gc.collect()
# go back
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
# now go to fixed
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
force_fixed=True)
del fwd_surf
gc.collect()
assert_true(repr(fwd_fixed))
assert_true(isinstance(fwd_fixed, Forward))
fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
force_fixed=True)
compare_forwards(fwd_fixed, fwd_fixed_io)
del fwd_fixed_io
gc.collect()
# now go back to cartesian (original condition)
fwd_new = convert_forward_solution(fwd_fixed)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@slow_test
@testing.requires_testing_data
def test_io_forward():
"""Test IO for forward solutions
"""
temp_dir = _TempDir()
# do extensive tests with MEEG + grad
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert_true(isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = op.join(temp_dir, 'test-fwd.fif')
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
fwd_read = read_forward_solution(fname_temp, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd_read['info'])
assert_true('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd['info'])
assert_true('mri_head_t' in fwd)
assert_true(fwd['surf_ori'])
# test warnings on bad filenames
fwd = read_forward_solution(fname_meeg_grad)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
write_forward_solution(fwd_badname, fwd)
read_forward_solution(fwd_badname)
assert_true(len(w) == 2)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
compare_forwards(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
"""Test projection of source space data to sensor space
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
assert_true(isinstance(fwd, Forward))
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
# Evoked
with warnings.catch_warnings(record=True) as w:
evoked = read_evokeds(fname_evoked, condition=0)
evoked.pick_types(meg=True)
evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
assert_equal(len(w), 2)
data = evoked.data
times = evoked.times
# do some tests
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
# Raw
raw = Raw(fname_raw)
raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)
data, times = raw_proj[:, :]
# do some tests
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
atol = 1. / sfreq
assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
assert_allclose(raw_proj.last_samp / sfreq,
t_start + (n_times - 1) / sfreq, atol=atol)
@testing.requires_testing_data
def test_restrict_forward_to_stc():
"""Test restriction of source space to source SourceEstimate
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_true(isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
@testing.requires_testing_data
def test_restrict_forward_to_label():
"""Test restriction of source space to label
"""
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution():
"""Test averaging forward solutions
"""
temp_dir = _TempDir()
fwd = read_forward_solution(fname_meeg)
# input not a list
assert_raises(TypeError, average_forward_solutions, 1)
# list is too short
assert_raises(ValueError, average_forward_solutions, [])
# negative weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
# all zero weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
# weights not same length
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
# list does not only have all dict()
assert_raises(TypeError, average_forward_solutions, [1, fwd])
# try an easy case
fwd_copy = average_forward_solutions([fwd])
assert_true(isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
# modify a fwd solution, save it, use MNE to average with old one
fwd_copy['sol']['data'] *= 0.5
fname_copy = op.join(temp_dir, 'copy-fwd.fif')
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
# now let's actually do it, with one filename and one fwd
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
compare_forwards(fwd, fwd_ave)
run_tests_if_main()
| |
import copy
import json
import platform
import random
import sys
from datetime import datetime, timedelta
import numpy as np
import pytest
import ray
from ray.tests.conftest import (
file_system_object_spilling_config,
buffer_object_spilling_config,
mock_distributed_fs_object_spilling_config,
)
from ray.external_storage import create_url_with_offset, parse_url_with_offset
from ray._private.gcs_utils import use_gcs_for_bootstrap
from ray._private.test_utils import wait_for_condition
from ray.internal.internal_api import memory_summary
from ray._raylet import GcsClientOptions
def run_basic_workload():
"""Run the workload that requires spilling."""
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
refs = []
refs.append([ray.put(arr) for _ in range(2)])
ray.get(ray.put(arr))
def is_dir_empty(temp_folder, append_path=ray.ray_constants.DEFAULT_OBJECT_PREFIX):
# append_path is used because the file based spilling will append
# new directory path.
num_files = 0
temp_folder = temp_folder / append_path
if not temp_folder.exists():
return True
for path in temp_folder.iterdir():
num_files += 1
return num_files == 0
def assert_no_thrashing(address):
state = ray.state.GlobalState()
if use_gcs_for_bootstrap():
options = GcsClientOptions.from_gcs_address(address)
else:
options = GcsClientOptions.from_redis_address(
address, ray.ray_constants.REDIS_DEFAULT_PASSWORD
)
state._initialize_global_state(options)
summary = memory_summary(address=address, stats_only=True)
restored_bytes = 0
consumed_bytes = 0
for line in summary.split("\n"):
if "Restored" in line:
restored_bytes = int(line.split(" ")[1])
if "consumed" in line:
consumed_bytes = int(line.split(" ")[-2])
assert (
consumed_bytes >= restored_bytes
), f"consumed: {consumed_bytes}, restored: {restored_bytes}"
def test_invalid_config_raises_exception(shutdown_only):
# Make sure ray.init raises an exception before
# it starts processes when invalid object spilling
# config is given.
with pytest.raises(ValueError):
ray.init(
_system_config={
"object_spilling_config": json.dumps({"type": "abc"}),
}
)
with pytest.raises(Exception):
copied_config = copy.deepcopy(file_system_object_spilling_config)
# Add invalid params to the config.
copied_config["params"].update({"random_arg": "abc"})
ray.init(
_system_config={
"object_spilling_config": json.dumps(copied_config),
}
)
with pytest.raises(Exception):
copied_config = copy.deepcopy(file_system_object_spilling_config)
# Add invalid value type to the config.
copied_config["params"].update({"buffer_size": "abc"})
ray.init(
_system_config={
"object_spilling_config": json.dumps(copied_config),
}
)
def test_url_generation_and_parse():
url = "s3://abc/def/ray_good"
offset = 10
size = 30
url_with_offset = create_url_with_offset(url=url, offset=offset, size=size)
parsed_result = parse_url_with_offset(url_with_offset)
assert parsed_result.base_url == url
assert parsed_result.offset == offset
assert parsed_result.size == size
def test_default_config(shutdown_only):
ray.init(num_cpus=0, object_store_memory=75 * 1024 * 1024)
# Make sure the object spilling configuration is properly set.
config = json.loads(ray.worker._global_node._config["object_spilling_config"])
assert config["type"] == "filesystem"
assert config["params"]["directory_path"] == ray.worker._global_node._session_dir
# Make sure the basic workload can succeed.
run_basic_workload()
ray.shutdown()
# Make sure config is not initalized if spilling is not enabled..
ray.init(
num_cpus=0,
object_store_memory=75 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": False,
"object_store_full_delay_ms": 100,
},
)
assert "object_spilling_config" not in ray.worker._global_node._config
run_basic_workload()
ray.shutdown()
# Make sure when we use a different config, it is reflected.
ray.init(
num_cpus=0,
_system_config={
"object_spilling_config": (
json.dumps(mock_distributed_fs_object_spilling_config)
)
},
)
config = json.loads(ray.worker._global_node._config["object_spilling_config"])
assert config["type"] == "mock_distributed_fs"
def test_default_config_buffering(shutdown_only):
ray.init(
num_cpus=0,
_system_config={
"object_spilling_config": (json.dumps(buffer_object_spilling_config))
},
)
config = json.loads(ray.worker._global_node._config["object_spilling_config"])
assert config["type"] == buffer_object_spilling_config["type"]
assert (
config["params"]["buffer_size"]
== buffer_object_spilling_config["params"]["buffer_size"]
)
def test_default_config_cluster(ray_start_cluster_enabled):
cluster = ray_start_cluster_enabled
cluster.add_node(num_cpus=0)
ray.init(cluster.address)
worker_nodes = []
worker_nodes.append(
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
)
cluster.wait_for_nodes()
# Run the basic spilling workload on both
# worker nodes and make sure they are working.
@ray.remote
def task():
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
refs = []
refs.append([ray.put(arr) for _ in range(2)])
ray.get(ray.put(arr))
ray.get([task.remote() for _ in range(2)])
@pytest.mark.skipif(platform.system() == "Windows", reason="Hangs on Windows.")
def test_spilling_not_done_for_pinned_object(object_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, temp_folder = object_spilling_config
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
},
)
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
ref = ray.get(ray.put(arr)) # noqa
ref2 = ray.put(arr) # noqa
wait_for_condition(lambda: is_dir_empty(temp_folder))
assert_no_thrashing(address["address"])
def test_spill_remote_object(
ray_start_cluster_enabled, multi_node_object_spilling_config
):
cluster = ray_start_cluster_enabled
object_spilling_config, _ = multi_node_object_spilling_config
cluster.add_node(
num_cpus=0,
object_store_memory=75 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"max_io_workers": 4,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
},
)
ray.init(address=cluster.address)
cluster.add_node(object_store_memory=75 * 1024 * 1024)
cluster.wait_for_nodes()
@ray.remote
def put():
return np.random.rand(5 * 1024 * 1024) # 40 MB data
@ray.remote
def depends(arg):
return
ref = put.remote()
copy = np.copy(ray.get(ref))
# Evict local copy.
ray.put(np.random.rand(5 * 1024 * 1024)) # 40 MB data
# Remote copy should cause first remote object to get spilled.
ray.get(put.remote())
sample = ray.get(ref)
assert np.array_equal(sample, copy)
# Evict the spilled object.
del sample
ray.get(put.remote())
ray.put(np.random.rand(5 * 1024 * 1024)) # 40 MB data
# Test passing the spilled object as an arg to another task.
ray.get(depends.remote(ref))
assert_no_thrashing(cluster.address)
def test_spill_objects_automatically(object_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, _ = object_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
},
)
replay_buffer = []
solution_buffer = []
buffer_length = 100
# Create objects of more than 800 MiB.
for _ in range(buffer_length):
ref = None
while ref is None:
multiplier = random.choice([1, 2, 3])
arr = np.random.rand(multiplier * 1024 * 1024)
ref = ray.put(arr)
replay_buffer.append(ref)
solution_buffer.append(arr)
print("spill done.")
# randomly sample objects
for _ in range(1000):
index = random.choice(list(range(buffer_length)))
ref = replay_buffer[index]
solution = solution_buffer[index]
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, solution)
assert_no_thrashing(address["address"])
@pytest.mark.skipif(
platform.system() in ["Darwin"],
reason="Very flaky on OSX.",
)
def test_unstable_spill_objects_automatically(unstable_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, _ = unstable_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
},
)
replay_buffer = []
solution_buffer = []
buffer_length = 20
# Each object averages 16MiB => 320MiB total.
for _ in range(buffer_length):
multiplier = random.choice([1, 2, 3])
arr = np.random.rand(multiplier * 1024 * 1024)
ref = ray.put(arr)
replay_buffer.append(ref)
solution_buffer.append(arr)
print("spill done.")
# randomly sample objects
for _ in range(10):
index = random.choice(list(range(buffer_length)))
ref = replay_buffer[index]
solution = solution_buffer[index]
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, solution)
assert_no_thrashing(address["address"])
def test_slow_spill_objects_automatically(slow_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, _ = slow_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
},
)
replay_buffer = []
solution_buffer = []
buffer_length = 10
# Create objects of more than 800 MiB.
for _ in range(buffer_length):
ref = None
while ref is None:
multiplier = random.choice([1, 2, 3])
arr = np.random.rand(multiplier * 1024 * 1024)
ref = ray.put(arr)
replay_buffer.append(ref)
solution_buffer.append(arr)
print("spill done.")
# randomly sample objects
for _ in range(buffer_length):
index = random.choice(list(range(buffer_length)))
ref = replay_buffer[index]
solution = solution_buffer[index]
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, solution)
assert_no_thrashing(address["address"])
def test_spill_stats(object_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, _ = object_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=100 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": True,
"max_io_workers": 100,
"min_spilling_size": 1,
"object_spilling_config": object_spilling_config,
},
)
@ray.remote
def f():
return np.zeros(50 * 1024 * 1024, dtype=np.uint8)
ids = []
for _ in range(4):
x = f.remote()
ids.append(x)
while ids:
print(ray.get(ids.pop()))
x_id = f.remote() # noqa
ray.get(x_id)
s = memory_summary(address=address["address"], stats_only=True)
assert "Plasma memory usage 50 MiB, 1 objects, 50.0% full" in s, s
assert "Spilled 200 MiB, 4 objects" in s, s
assert "Restored 150 MiB, 3 objects" in s, s
# Test if consumed bytes are correctly calculated.
obj = ray.put(np.zeros(30 * 1024 * 1024, dtype=np.uint8))
@ray.remote
def func_with_ref(obj):
return True
ray.get(func_with_ref.remote(obj))
s = memory_summary(address=address["address"], stats_only=True)
# 50MB * 5 references + 30MB used for task execution.
assert "Objects consumed by Ray tasks: 280 MiB." in s, s
assert_no_thrashing(address["address"])
@pytest.mark.skipif(platform.system() == "Darwin", reason="Failing on macOS.")
@pytest.mark.asyncio
@pytest.mark.parametrize("is_async", [False, True])
async def test_spill_during_get(object_spilling_config, shutdown_only, is_async):
object_spilling_config, _ = object_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=100 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"max_io_workers": 1,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
"worker_register_timeout_seconds": 600,
},
)
if is_async:
@ray.remote(num_cpus=0)
class Actor:
async def f(self):
return np.zeros(10 * 1024 * 1024)
else:
@ray.remote(num_cpus=0)
def f():
return np.zeros(10 * 1024 * 1024)
if is_async:
a = Actor.remote()
ids = []
for i in range(10):
if is_async:
x = a.f.remote()
else:
x = f.remote()
print(i, x)
ids.append(x)
start = datetime.now()
# Concurrent gets, which require restoring from external storage, while
# objects are being created.
for x in ids:
if is_async:
obj = await x
else:
obj = ray.get(x)
print(obj.shape)
del obj
timeout_seconds = 30
duration = datetime.now() - start
assert duration <= timedelta(
seconds=timeout_seconds
), "Concurrent gets took too long. Maybe IO workers are not started properly." # noqa: E501
assert_no_thrashing(address["address"])
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| |
"""
Test unpacking structs
"""
import io
import struct
import pytest
from pcapng.exceptions import BadMagic, CorruptedFile, StreamEmpty, TruncatedFile
from pcapng.structs import (
IntField,
ListField,
NameResolutionRecordField,
Option,
Options,
OptionsField,
PacketBytes,
RawBytes,
read_block_data,
read_bytes,
read_bytes_padded,
read_int,
read_options,
read_section_header,
struct_decode,
)
def test_read_int():
# 16bit, signed, positive
assert read_int(io.BytesIO(b"\x12\x34"), 16, True, ">") == 0x1234
assert read_int(io.BytesIO(b"\x12\x34"), 16, True, "<") == 0x3412
assert read_int(io.BytesIO(b"\x12\x34extra"), 16, True, ">") == 0x1234
assert read_int(io.BytesIO(b"\x12\x34extra"), 16, True, "<") == 0x3412
# 16bit, signed, negative
assert read_int(io.BytesIO(b"\xed\xcc"), 16, True, ">") == -0x1234
assert read_int(io.BytesIO(b"\xcc\xed"), 16, True, "<") == -0x1234
assert read_int(io.BytesIO(b"\xed\xccextra"), 16, True, ">") == -0x1234
assert read_int(io.BytesIO(b"\xcc\xedextra"), 16, True, "<") == -0x1234
# 16bit, unsigned
assert read_int(io.BytesIO(b"\x12\x34"), 16, False, ">") == 0x1234
assert read_int(io.BytesIO(b"\x12\x34"), 16, False, "<") == 0x3412
assert read_int(io.BytesIO(b"\x12\x34extra"), 16, False, ">") == 0x1234
assert read_int(io.BytesIO(b"\x12\x34extra"), 16, False, "<") == 0x3412
# ..do we really need to test other sizes?
assert (
read_int(io.BytesIO(b"\x12\x34\x56\x78"), 32, False, ">") == 0x12345678
) # noqa
assert (
read_int(io.BytesIO(b"\x12\x34\x56\x78"), 32, False, "<") == 0x78563412
) # noqa
assert (
read_int(io.BytesIO(b"\x12\x34\x56\x78"), 32, True, ">") == 0x12345678
) # noqa
assert (
read_int(io.BytesIO(b"\x12\x34\x56\x78"), 32, True, "<") == 0x78563412
) # noqa
def test_read_int_empty_stream():
with pytest.raises(StreamEmpty):
read_int(io.BytesIO(b""), 32)
def test_read_int_truncated_stream():
with pytest.raises(TruncatedFile):
read_int(io.BytesIO(b"AB"), 32)
def test_read_section_header_big_endian():
data = io.BytesIO(
# '\x0a\x0d\x0d\x0a' # magic number has already been read..
b"\x00\x00\x00\x1c" # block length (28 bytes)
b"\x1a\x2b\x3c\x4d" # byte order magic [it's big endian!]
b"\x00\x01\x00\x00" # version 1.0
b"\xff\xff\xff\xff\xff\xff\xff\xff" # section length unknown
b"" # no options here!
b"\x00\x00\x00\x1c"
) # block length, again
block = read_section_header(data)
assert block["endianness"] == ">"
assert block["data"] == b"\x00\x01\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff"
def test_read_section_header_little_endian():
data = io.BytesIO(
# '\x0a\x0d\x0d\x0a' # magic number
b"\x1c\x00\x00\x00" # block length (28 bytes)
b"\x4d\x3c\x2b\x1a" # byte order magic [it's big endian!]
b"\x01\x00\x00\x00" # version 1.0
b"\xff\xff\xff\xff\xff\xff\xff\xff" # section length unknown
b"" # no options here!
b"\x1c\x00\x00\x00"
) # block length, again
block = read_section_header(data)
assert block["endianness"] == "<"
assert block["data"] == b"\x01\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff"
def test_read_section_header_bad_order_magic():
data = io.BytesIO(
# '\x0a\x0d\x0d\x0a' # magic number
b"\x1c\x00\x00\x00" # block length (28 bytes)
b"\x0B\xAD\xBE\xEF" # byte order magic [it's big endian!]
b"\x01\x00\x00\x00" # version 1.0
b"\xff\xff\xff\xff\xff\xff\xff\xff" # section length unknown
b"" # no options here!
b"\x1c\x00\x00\x00"
) # block length, again
with pytest.raises(BadMagic) as ctx:
read_section_header(data)
assert str(ctx.value) == (
"Wrong byte order magic: got 0x0BADBEEF, " "expected 0x1A2B3C4D or 0x4D3C2B1A"
)
def test_read_section_header_mismatching_lengths():
data = io.BytesIO(
# '\x0a\x0d\x0d\x0a' # magic number
b"\x00\x00\x00\x1c" # block length (28 bytes)
b"\x1a\x2b\x3c\x4d" # byte order magic [it's big endian!]
b"\x00\x01\x00\x00" # version 1.0
b"\xff\xff\xff\xff\xff\xff\xff\xff" # section length unknown
b"" # no options here!
b"\x00\x00\x00\x00"
) # block length, again but WRONG!
with pytest.raises(CorruptedFile) as ctx:
read_section_header(data)
assert str(ctx.value) == "Mismatching block lengths: 28 and 0"
def test_read_block_data_big_endian():
# No need for padding; size = 4 bytes (size 0x10)
data = io.BytesIO(b"\x00\x00\x00\x10" b"1234" b"\x00\x00\x00\x10")
assert read_block_data(data, ">") == b"1234"
# Base size: 0x0c (12); payload size: 0x05; total: 0x11 (17)
data = io.BytesIO(b"\x00\x00\x00\x11" b"12345XXX" b"\x00\x00\x00\x11")
assert read_block_data(data, ">") == b"12345"
def test_read_block_data_little_endian():
# No need for padding; size = 4 bytes (size 0x10)
data = io.BytesIO(b"\x10\x00\x00\x00" b"1234" b"\x10\x00\x00\x00\x10")
assert read_block_data(data, "<") == b"1234"
# Base size: 0x0c (12); payload size: 0x05; total: 0x11 (17)
data = io.BytesIO(b"\x11\x00\x00\x00" b"12345XXX" b"\x11\x00\x00\x00")
assert read_block_data(data, "<") == b"12345"
def test_read_block_data_mismatching_lengths():
data = io.BytesIO(b"\x00\x00\x00\x11" b"12345XXX" b"\xff\x00\x00\x11")
with pytest.raises(CorruptedFile) as ctx:
read_block_data(data, ">")
assert str(ctx.value) == "Mismatching block lengths: 17 and 4278190097"
def test_read_bytes():
data = io.BytesIO(b"foobar")
assert read_bytes(data, 3) == b"foo"
assert read_bytes(data, 3) == b"bar"
data = io.BytesIO(b"foo")
with pytest.raises(TruncatedFile):
read_bytes(data, 4)
data = io.BytesIO(b"")
with pytest.raises(StreamEmpty):
read_bytes(data, 4)
data = io.BytesIO(b"")
assert read_bytes(data, 0) == b""
def test_read_bytes_padded():
data = io.BytesIO(b"spam")
assert read_bytes_padded(data, 4) == b"spam"
data = io.BytesIO(b"spameggsbaconXXX")
assert read_bytes_padded(data, 4) == b"spam"
assert read_bytes_padded(data, 4) == b"eggs"
assert read_bytes_padded(data, 5) == b"bacon"
data = io.BytesIO(b"fooXbarX")
assert data.tell() == 0
assert read_bytes_padded(data, 3) == b"foo"
assert data.tell() == 4
assert read_bytes_padded(data, 3) == b"bar"
data = io.BytesIO(b"foobar")
data.read(1)
assert data.tell() == 1
with pytest.raises(RuntimeError):
read_bytes_padded(data, 3)
def test_decode_simple_struct():
schema = [
("rawbytes", RawBytes(12), b""),
("int32s", IntField(32, True), 0),
("int32u", IntField(32, False), 0),
("int16s", IntField(16, True), 0),
("int16u", IntField(16, False), 0),
]
stream = io.BytesIO()
stream.write(b"Hello world!")
stream.write(struct.pack(">i", -1234))
stream.write(struct.pack(">I", 1234))
stream.write(struct.pack(">h", -789))
stream.write(struct.pack(">H", 789))
stream.seek(0)
decoded = struct_decode(schema, stream, ">")
assert decoded["rawbytes"] == b"Hello world!"
assert decoded["int32s"] == -1234
assert decoded["int32u"] == 1234
assert decoded["int16s"] == -789
assert decoded["int16u"] == 789
def test_read_options():
data = io.BytesIO(
b"\x00\x01\x00\x0cHello world!"
b"\x00\x01\x00\x0fSpam eggs bacon\x00"
b"\x00\x02\x00\x0fSome other text\x00"
b"\x00\x00\x00\x00"
)
options = read_options(data, ">")
assert options == [
(1, b"Hello world!"),
(1, b"Spam eggs bacon"),
(2, b"Some other text"),
]
def test_read_options_2():
data = io.BytesIO(
b"\x00\x01\x00\x0e"
b"Just a comment\x00\x00"
b"\x00\x02\x00\x0b"
b"My Computer\x00"
b"\x00\x03\x00\x05"
b"My OS\x00\x00\x00"
b"\x00\x04\x00\x0a"
b"A fake app\x00\x00"
b"\x00\x00\x00\x00"
)
options = read_options(data, ">")
assert options == [
(1, b"Just a comment"),
(2, b"My Computer"),
(3, b"My OS"),
(4, b"A fake app"),
]
def test_options_object():
schema = [
Option(2, "spam", "bytes"),
Option(3, "eggs", "u32"),
Option(4, "bacon", "string"),
Option(5, "missing"),
]
raw_options = [
(1, b"Comment #1"),
(1, b"Comment #2"),
(2, b"I love spam spam spam!"),
(3, b"\x00\x00\x01\x00"),
(4, b"Bacon is delicious!"),
(20, b"Something different"),
]
options = Options(schema=schema, data=raw_options, endianness=">")
assert options["opt_comment"] == "Comment #1"
assert options[1] == "Comment #1"
assert options.get_all("opt_comment") == ["Comment #1", "Comment #2"]
assert isinstance(options["opt_comment"], str)
assert options["spam"] == b"I love spam spam spam!"
assert isinstance(options["spam"], bytes)
assert options["eggs"] == 0x100
assert isinstance(options["eggs"], int)
assert options["bacon"] == "Bacon is delicious!"
assert isinstance(options["bacon"], str)
with pytest.raises(KeyError):
options["missing"]
with pytest.raises(KeyError):
options[5]
with pytest.raises(KeyError):
options["Something completely missing"]
with pytest.raises(KeyError):
options[12345]
assert options[20] == b"Something different"
# Check length / keys
assert len(options) == 5
assert set(options.keys()) == set(["opt_comment", "spam", "eggs", "bacon", 20])
# Check "in" and "not in"
assert "opt_comment" in options
assert "spam" in options
assert "eggs" in options
assert "bacon" in options
assert "missing" not in options
assert "something different" not in options
assert 1 in options
assert 2 in options
assert 3 in options
assert 4 in options
assert 5 not in options
assert 12345 not in options
def test_unpack_dummy_packet():
schema = [
("a_string", RawBytes(8), ""),
("a_number", IntField(32, False), 0),
("options", OptionsField([]), None),
("pb_captured_len", IntField(32, False), 0),
("pb_orig_len", IntField(32, False), 0),
("packet_data", PacketBytes("pb_captured_len"), b""),
("spb_orig_len", IntField(32, False), 0),
("simple_packet_data", PacketBytes("spb_orig_len"), b""),
("name_res", ListField(NameResolutionRecordField()), []),
("another_number", IntField(32, False), 0),
]
# Note: NULLs are for padding!
data = io.BytesIO(
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x01\x00"
# Options
b"\x00\x01\x00\x0cHello world!"
b"\x00\x01\x00\x0fSpam eggs bacon\x00"
b"\x00\x02\x00\x0fSome other text\x00"
b"\x00\x00\x00\x00"
# Enhanced Packet data
b"\x00\x00\x00\x12"
b"\x00\x01\x00\x00"
b"These are 18 bytes\x00\x00"
# Simple packet data
b"\x00\x00\x00\x0d"
b"Simple packet\x00\x00\x00"
# List of name resolution items
b"\x00\x01" # IPv4
b"\x00\x13" # Length: 19bytes
b"\x0a\x22\x33\x44www.example.com\x00" # 19 bytes (10.34.51.68)
b"\x00\x01" # IPv4
b"\x00\x13" # Length: 19bytes
b"\xc0\xa8\x14\x01www.example.org\x00" # 19 bytes (192.168.20.1)
b"\x00\x02" # IPv6
b"\x00\x1e" # 30 bytes
b"\x00\x11\x22\x33\x44\x55\x66\x77"
b"\x88\x99\xaa\xbb\xcc\xdd\xee\xff"
b"v6.example.net\x00\x00"
b"\x00\x00\x00\x00" # End marker
# Another number, to check end
b"\xaa\xbb\xcc\xdd"
)
unpacked = struct_decode(schema, data, endianness=">")
assert unpacked["a_string"] == b"\x01\x23\x45\x67\x89\xab\xcd\xef"
assert unpacked["a_number"] == 0x100
assert isinstance(unpacked["options"], Options)
assert len(unpacked["options"]) == 2
assert unpacked["options"]["opt_comment"] == "Hello world!"
assert unpacked["options"][2] == b"Some other text"
assert unpacked["pb_captured_len"] == 0x12
assert unpacked["pb_orig_len"] == 0x10000
assert unpacked["packet_data"] == b"These are 18 bytes"
assert unpacked["spb_orig_len"] == 13
assert unpacked["simple_packet_data"] == b"Simple packet"
assert unpacked["name_res"] == [
{"address": "10.34.51.68", "names": ["www.example.com"], "type": 1},
{"address": "192.168.20.1", "names": ["www.example.org"], "type": 1},
{
"type": 2,
"address": "11:2233:4455:6677:8899:aabb:ccdd:eeff",
"names": ["v6.example.net"],
},
]
| |
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from devicehive.transports.transport import Transport
from devicehive.transports.transport import TransportError
import requests
import threading
import sys
import time
class HttpTransport(Transport):
"""Http transport class."""
def __init__(self, data_format_class, data_format_options, handler_class,
handler_options):
super(HttpTransport, self).__init__('http', HttpTransportError,
data_format_class,
data_format_options, handler_class,
handler_options)
self._url = None
self._options = None
self._events_queue_sleep = None
self._events_queue = []
self._subscription_ids = []
self._success_codes = [200, 201, 204]
def _connect(self, url, **options):
self._url = url
self._options = options
self._events_queue_sleep_time = options.pop('events_queue_sleep_time',
1e-6)
if not self._url.endswith('/'):
self._url += '/'
self._connected = True
self._handle_connect()
def _receive(self):
while self._connected and not self._exception_info:
if not self._events_queue:
time.sleep(self._events_queue_sleep_time)
continue
for event in self._events_queue.pop(0):
self._handle_event(event)
if not self._connected:
return
def _disconnect(self):
self._events_queue = []
self._subscription_ids = []
self._handle_disconnect()
def _request_call(self, method, url, **params):
options = self._options.copy()
options.update(params)
try:
response = requests.request(method, url, **options)
code = response.status_code
if self._text_data_type:
return code, response.text
return code, response.content
except requests.RequestException as http_error:
error = http_error
raise self._error(error)
def _request(self, request_id, action, request, **params):
method = params.pop('method', 'GET')
url = self._url + params.pop('url')
request_delete_keys = params.pop('request_delete_keys', [])
request_key = params.pop('request_key', None)
response_key = params.pop('response_key', None)
for request_delete_key in request_delete_keys:
del request[request_delete_key]
if request:
if request_key:
request = request[request_key]
params['data'] = self._encode(request)
code, data = self._request_call(method, url, **params)
response = {self.REQUEST_ID_KEY: request_id,
self.REQUEST_ACTION_KEY: action}
if code in self._success_codes:
response[self.RESPONSE_STATUS_KEY] = self.RESPONSE_SUCCESS_STATUS
if not data:
return response
if response_key:
response[response_key] = self._decode(data)
return response
response.update(self._decode(data))
return response
response[self.RESPONSE_STATUS_KEY] = self.RESPONSE_ERROR_STATUS
response[self.RESPONSE_CODE_KEY] = code
if not data:
return response
try:
response_error = self._decode(data)['message']
except Exception:
response_error = data
response[self.RESPONSE_ERROR_KEY] = response_error
return response
def _subscription_request(self, request_id, action, subscription_request,
response_subscription_id_key):
response = self._subscription_probe(**subscription_request)
if response[self.RESPONSE_STATUS_KEY] != self.RESPONSE_SUCCESS_STATUS:
return response
subscription_id = subscription_request['subscription_id']
self._subscription_ids.append(subscription_id)
subscription_thread_name = '%s-transport-subscription-%s'
subscription_thread_name %= (self._name, subscription_id)
subscription_thread = threading.Thread(target=self._subscription,
kwargs=subscription_request)
subscription_thread.daemon = True
subscription_thread.name = subscription_thread_name
subscription_thread.start()
return {self.REQUEST_ID_KEY: request_id,
self.REQUEST_ACTION_KEY: action,
self.RESPONSE_STATUS_KEY: self.RESPONSE_SUCCESS_STATUS,
response_subscription_id_key: subscription_id}
def _subscription_probe(self, subscription_id, request_id, action, request,
params):
params = params.copy()
params.pop('response_error_handler', None)
params.pop('response_error_handler_args', None)
params.pop('params_timestamp_key', None)
params.pop('response_timestamp_key', None)
params.pop('response_subscription_id_key', None)
params['params']['waitTimeout'] = 0
params['params']['limit'] = 0
return self._request(request_id, action, request.copy(), **params)
def _subscription(self, subscription_id, request_id, action, request,
params):
response_error_handler = params.pop('response_error_handler', None)
response_error_handler_args = params.pop('response_error_handler_args',
None)
response_key = params['response_key']
params_timestamp_key = params.pop('params_timestamp_key', 'timestamp')
response_timestamp_key = params.pop('response_timestamp_key',
'timestamp')
response_subscription_id_key = params.pop(
'response_subscription_id_key', 'subscriptionId')
while subscription_id in self._subscription_ids:
try:
response = self._request(request_id, action, request.copy(),
**params)
if subscription_id not in self._subscription_ids:
return
response_status = response[self.RESPONSE_STATUS_KEY]
if response_status != self.RESPONSE_SUCCESS_STATUS:
response_code = response[self.RESPONSE_CODE_KEY]
error = 'Subscription request error. Action: %s. Code: %s.'
error %= (action, response_code)
if not response_error_handler:
raise self._error(error)
if not response_error_handler(params, response_code,
*response_error_handler_args):
raise self._error(error)
response = self._request(request_id, action, request.copy(),
**params)
if subscription_id not in self._subscription_ids:
return
response_status = response[self.RESPONSE_STATUS_KEY]
if response_status != self.RESPONSE_SUCCESS_STATUS:
raise self._error(error)
events = response[response_key]
if not len(events):
continue
timestamp = events[-1][response_timestamp_key]
if not params.get('params'):
params['params'] = {}
params['params'][params_timestamp_key] = timestamp
events = [{self.REQUEST_ACTION_KEY: action,
response_key: event,
response_subscription_id_key: subscription_id}
for event in events]
self._events_queue.append(events)
except:
self._exception_info = sys.exc_info()
def _remove_subscription_request(self, request_id, action, subscription_id,
response_code, response_error):
if subscription_id not in self._subscription_ids:
return {self.REQUEST_ID_KEY: request_id,
self.REQUEST_ACTION_KEY: action,
self.RESPONSE_STATUS_KEY: self.RESPONSE_ERROR_STATUS,
self.RESPONSE_CODE_KEY: response_code,
self.RESPONSE_ERROR_KEY: response_error}
self._subscription_ids.remove(subscription_id)
return {self.REQUEST_ID_KEY: request_id,
self.REQUEST_ACTION_KEY: action,
self.RESPONSE_STATUS_KEY: self.RESPONSE_SUCCESS_STATUS}
def send_request(self, request_id, action, request, **params):
self._ensure_connected()
subscription_request = params.pop('subscription_request', {})
response_subscription_id_key = params.pop(
'response_subscription_id_key', 'subscriptionId')
remove_subscription_request = params.pop('remove_subscription_request',
{})
if subscription_request:
response = self._subscription_request(request_id, action,
subscription_request,
response_subscription_id_key)
elif remove_subscription_request:
response = self._remove_subscription_request(
request_id, action, **remove_subscription_request)
else:
response = self._request(request_id, action, request, **params)
self._events_queue.append([response])
def request(self, request_id, action, request, **params):
self._ensure_connected()
subscription_request = params.pop('subscription_request', {})
response_subscription_id_key = params.pop(
'response_subscription_id_key', 'subscriptionId')
remove_subscription_request = params.pop('remove_subscription_request',
{})
if subscription_request:
return self._subscription_request(request_id, action,
subscription_request,
response_subscription_id_key)
if remove_subscription_request:
return self._remove_subscription_request(
request_id, action, **remove_subscription_request)
return self._request(request_id, action, request, **params)
class HttpTransportError(TransportError):
"""Http transport error."""
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Swift containers.
"""
from django.core.urlresolvers import reverse
from django import http
from django.utils.functional import cached_property # noqa
from django.utils import http as utils_http
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from horizon import browsers
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import swift
from openstack_dashboard.dashboards.project.containers \
import browsers as project_browsers
from openstack_dashboard.dashboards.project.containers \
import forms as project_forms
from openstack_dashboard.dashboards.project.containers import tables
import os
def for_url(container_name):
"""Build a URL friendly container name.
Add Swift delimiter if necessary.
The name can contain '%' (bug 1231904).
"""
container_name = tables.wrap_delimiter(container_name)
return utils_http.urlquote(container_name)
class ContainerView(browsers.ResourceBrowserView):
browser_class = project_browsers.ContainerBrowser
template_name = "project/containers/index.html"
def get_containers_data(self):
containers = []
self._more = None
marker = self.request.GET.get('marker', None)
try:
containers, self._more = api.swift.swift_get_containers(
self.request, marker=marker)
except Exception:
msg = _('Unable to retrieve container list.')
exceptions.handle(self.request, msg)
return containers
@cached_property
def objects(self):
"""Returns a list of objects given the subfolder's path.
The path is from the kwargs of the request.
"""
objects = []
self._more = None
marker = self.request.GET.get('marker', None)
container_name = self.kwargs['container_name']
subfolder = self.kwargs['subfolder_path']
prefix = None
if container_name:
self.navigation_selection = True
if subfolder:
prefix = subfolder
try:
objects, self._more = api.swift.swift_get_objects(
self.request,
container_name,
marker=marker,
prefix=prefix)
except Exception:
self._more = None
objects = []
msg = _('Unable to retrieve object list.')
exceptions.handle(self.request, msg)
return objects
def is_subdir(self, item):
content_type = "application/pseudo-folder"
return getattr(item, "content_type", None) == content_type
def is_placeholder(self, item):
object_name = getattr(item, "name", "")
return object_name.endswith(api.swift.FOLDER_DELIMITER)
def get_objects_data(self):
"""Returns a list of objects within the current folder."""
filtered_objects = [item for item in self.objects
if (not self.is_subdir(item) and
not self.is_placeholder(item))]
return filtered_objects
def get_subfolders_data(self):
"""Returns a list of subfolders within the current folder."""
filtered_objects = [item for item in self.objects
if self.is_subdir(item)]
return filtered_objects
def get_context_data(self, **kwargs):
context = super(ContainerView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
context['subfolders'] = []
if self.kwargs["subfolder_path"]:
(parent, slash, folder) = self.kwargs["subfolder_path"] \
.strip('/').rpartition('/')
while folder:
path = "%s%s%s/" % (parent, slash, folder)
context['subfolders'].insert(0, (folder, path))
(parent, slash, folder) = parent.rpartition('/')
return context
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateContainer
template_name = 'project/containers/create.html'
success_url = "horizon:project:containers:index"
def get_success_url(self):
parent = self.request.POST.get('parent', None)
if parent:
container, slash, remainder = parent.partition(
swift.FOLDER_DELIMITER)
args = (for_url(container), for_url(remainder))
return reverse(self.success_url, args=args)
else:
container = for_url(self.request.POST['name'])
return reverse(self.success_url, args=[container])
def get_initial(self):
initial = super(CreateView, self).get_initial()
initial['parent'] = self.kwargs['container_name']
return initial
class CreatePseudoFolderView(forms.ModalFormView):
form_class = project_forms.CreatePseudoFolder
template_name = 'project/containers/create_pseudo_folder.html'
success_url = "horizon:project:containers:index"
def get_success_url(self):
container_name = self.request.POST['container_name']
return reverse(self.success_url,
args=(tables.wrap_delimiter(container_name),
self.request.POST.get('path', '')))
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs['subfolder_path']}
def get_context_data(self, **kwargs):
context = super(CreatePseudoFolderView, self). \
get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
return context
class UploadView(forms.ModalFormView):
form_class = project_forms.UploadObject
template_name = 'project/containers/upload.html'
success_url = "horizon:project:containers:index"
def get_success_url(self):
container_name = for_url(self.request.POST['container_name'])
path = for_url(self.request.POST.get('path', ''))
args = (container_name, path)
return reverse(self.success_url, args=args)
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs['subfolder_path']}
def get_context_data(self, **kwargs):
context = super(UploadView, self).get_context_data(**kwargs)
container_name = utils_http.urlquote(self.kwargs["container_name"])
context['container_name'] = container_name
return context
def object_download(request, container_name, object_path):
try:
obj = api.swift.swift_get_object(request, container_name, object_path)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(request,
_("Unable to retrieve object."),
redirect=redirect)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_path.rsplit(swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = http.HttpResponse()
safe_name = filename.replace(",", "").encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe_name
response['Content-Type'] = 'application/octet-stream'
response.write(obj.data)
return response
class CopyView(forms.ModalFormView):
form_class = project_forms.CopyObject
template_name = 'project/containers/copy.html'
success_url = "horizon:project:containers:index"
def get_success_url(self):
new_container_name = for_url(self.request.POST['new_container_name'])
path = for_url(self.request.POST.get('path', ''))
args = (new_container_name, path)
return reverse(self.success_url, args=args)
def get_form_kwargs(self):
kwargs = super(CopyView, self).get_form_kwargs()
try:
containers = api.swift.swift_get_containers(self.request)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to list containers.'),
redirect=redirect)
kwargs['containers'] = [(c.name, c.name) for c in containers[0]]
return kwargs
def get_initial(self):
path = self.kwargs["subfolder_path"]
orig = "%s%s" % (path or '', self.kwargs["object_name"])
return {"new_container_name": self.kwargs["container_name"],
"orig_container_name": self.kwargs["container_name"],
"orig_object_name": orig,
"path": path,
"new_object_name": "%s copy" % self.kwargs["object_name"]}
def get_context_data(self, **kwargs):
context = super(CopyView, self).get_context_data(**kwargs)
container_name = utils_http.urlquote(self.kwargs["container_name"])
context['container_name'] = container_name
context['object_name'] = self.kwargs["object_name"]
return context
class ContainerDetailView(forms.ModalFormMixin, generic.TemplateView):
template_name = 'project/containers/container_detail.html'
@memoized.memoized_method
def get_object(self):
try:
return api.swift.swift_get_container(
self.request,
self.kwargs["container_name"],
with_data=False)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to retrieve details.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ContainerDetailView, self).get_context_data(**kwargs)
context['container'] = self.get_object()
return context
class ObjectDetailView(forms.ModalFormMixin, generic.TemplateView):
template_name = 'project/containers/object_detail.html'
@memoized.memoized_method
def get_object(self):
try:
return api.swift.swift_get_object(
self.request,
self.kwargs["container_name"],
self.kwargs["object_path"],
with_data=False)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to retrieve details.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ObjectDetailView, self).get_context_data(**kwargs)
context['object'] = self.get_object()
return context
class UpdateObjectView(forms.ModalFormView):
form_class = project_forms.UpdateObject
template_name = 'project/containers/update.html'
success_url = "horizon:project:containers:index"
def get_success_url(self):
container_name = for_url(self.request.POST['container_name'])
path = for_url(self.request.POST.get('path', ''))
args = (container_name, path)
return reverse(self.success_url, args=args)
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs["subfolder_path"],
"name": self.kwargs["object_name"]}
def get_context_data(self, **kwargs):
context = super(UpdateObjectView, self).get_context_data(**kwargs)
context['container_name'] = utils_http.urlquote(
self.kwargs["container_name"])
context['subfolder_path'] = utils_http.urlquote(
self.kwargs["subfolder_path"])
context['object_name'] = utils_http.urlquote(
self.kwargs["object_name"])
return context
| |
#!/usr/bin/env python3
import sys
import re
import argparse
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
class Point():
"CC event"
def __init__(self, x, y):
self.x = x
self.y = y
def listx(points):
return list(map(lambda pt: pt.x, points))
def listy(points):
return list(map(lambda pt: pt.y, points))
def plot_data(d):
plt.figure(1)
cwndx = listx(d["cwnd"])
cwndy = listy(d["cwnd"])
congx = listx(d["congestion"])
congy = listy(d["congestion"])
rcvrdx = listx(d["recovered"])
rcvrdy = listy(d["recovered"])
rxttx = listx(d["rxtTimeout"])
rxtty = listy(d["rxtTimeout"])
# cwnd/ssthresh/cc events
plt.subplot(311)
plt.title("cwnd/ssthresh")
pcwnd = plt.plot(cwndx, cwndy, 'r')
psst = plt.plot(cwndx, d["ssthresh"], 'y-')
pcong = plt.plot(congx, congy,'yo')
precov = plt.plot(rcvrdx, rcvrdy,'co')
prxtt = plt.plot(rxttx, rxtty,'mo')
marker1 = Line2D(range(1), range(1), color="r")
marker2 = Line2D(range(1), range(1), color="y")
marker3 = Line2D(range(1), range(1), color="w", marker="o", markerfacecolor="y")
marker4 = Line2D(range(1), range(1), color="w", marker="o", markerfacecolor="c")
marker5 = Line2D(range(1), range(1), color="w", marker="o", markerfacecolor="m")
plt.legend((marker1, marker2, marker3, marker4, marker5),
('cwnd', 'ssthresh', 'congestion', 'recovered', 'rxt-timeout'),
loc=4)
axes = plt.gca()
axes.set_ylim([-20e4, max(cwndy) + 20e4])
# snd variables
plt.subplot(312)
plt.title("cc variables")
plt.plot(cwndx, d["space"], 'g-', markersize=1)
plt.plot(cwndx, d["flight"], 'b-', markersize=1)
plt.plot(cwndx, d["sacked"], 'm:', markersize=1)
plt.plot(cwndx, d["lost"], 'y:', markersize=1)
plt.plot(cwndx, d["cc-space"], 'k:', markersize=1)
plt.plot(cwndx, cwndy, 'ro', markersize=2)
plt.plot(congx, congy, 'y^', markersize=10, markerfacecolor="y")
plt.plot(rcvrdx, rcvrdy, 'c^', markersize=10, markerfacecolor="c")
plt.plot(rxttx, rxtty, 'm^', markersize=10, markerfacecolor="m")
#plt.plot(cwndx, d["snd_wnd"], 'ko', markersize=1)
plt.legend(("snd-space", "flight", "sacked", "lost", "cc-space", "cwnd",
"congestion", "recovered", "rxt-timeout"),
loc=1)
# rto/srrt/rttvar
plt.subplot(313)
plt.title("rtt")
plt.plot(cwndx, d["srtt"], 'g-')
plt.plot(cwndx, [x/1000 for x in d["mrtt-us"]], 'r-')
plt.plot(cwndx, d["rttvar"], 'b-')
plt.legend(["srtt", "mrtt-us", "rttvar"])
axes = plt.gca()
#plt.plot(cwndx, rto, 'r-')
#axes.set_ylim([0, int(max(rto[2:len(rto)])) + 50])
# show
plt.show()
def find_pattern(file_path,session_idx):
is_active_open = 1
listener_pattern = "l\[\d\]"
if (is_active_open):
initial_pattern = "\[\d\](\.\d+:\d+\->\.\d+:\d+)\s+open:\s"
else:
initial_pattern = "\[\d\](\.\d+:\d+\->\.\d+:\d+)\s"
idx = 0
f = open(file_path, 'r')
for line in f:
# skip listener lines (server)
if (re.search(listener_pattern, line) != None):
continue
match = re.search(initial_pattern, line)
if (match == None):
continue
if (idx < session_idx):
idx += 1
continue
filter_pattern = str(match.group(1)) + "\s+(.+)"
print ("pattern is %s" % filter_pattern)
f.close()
return filter_pattern
raise Exception ("Could not find initial pattern")
def compute_time(min, sec, msec):
return int(min)*60 + int(sec) + int(msec)/1000.0
def run(file_path, session_idx):
filter_sessions = 1
filter_pattern = ""
patterns = {
"time" : "^\d+:(\d+):(\d+):(\d+):\d+",
"listener" : "l\[\d\]",
"cc" : "cwnd (\d+) flight (\d+) space (\d+) ssthresh (\d+) snd_wnd (\d+)",
"cc-snd" : "cc_space (\d+) sacked (\d+) lost (\d+)",
"rtt" : "rto (\d+) srtt (\d+) mrtt-us (\d+) rttvar (\d+)",
"rxtt" : "rxt-timeout",
"congestion": "congestion",
"recovered" : "recovered",
}
d = {
"cwnd" : [],
"space" : [],
"flight" : [],
"ssthresh" : [],
"snd_wnd" : [],
"cc-space" : [],
"lost" : [],
"sacked" : [],
"rto" : [],
"srtt" : [],
"mrtt-us" : [],
"rttvar" : [],
"rxtTimeout" : [],
"congestion" : [],
"recovered" : [],
}
if (filter_sessions):
filter_pattern = find_pattern(file_path, session_idx)
f = open(file_path, 'r')
stats_index = 0
start_time = 0
for line in f:
# skip listener lines (server)
if (re.search(patterns["listener"], line) != None):
continue
# filter sessions
if (filter_sessions):
match = re.search(filter_pattern, line)
if (match == None):
continue
original_line = line
line = match.group(1)
match = re.search (patterns["time"], original_line)
if (match == None):
print "something went wrong! no time!"
continue
time = compute_time (match.group(1), match.group(2), match.group(3))
if (start_time == 0):
start_time = time
time = time - start_time
match = re.search(patterns["cc"], line)
if (match != None):
d["cwnd"].append(Point(time, int(match.group(1))))
d["flight"].append(int(match.group(2)))
d["space"].append(int(match.group(3)))
d["ssthresh"].append(int(match.group(4)))
d["snd_wnd"].append(int(match.group(5)))
stats_index += 1
continue
match = re.search(patterns["cc-snd"], line)
if (match != None):
d["cc-space"].append(int(match.group(1)))
d["sacked"].append(int(match.group(2)))
d["lost"].append(int(match.group(3)))
match = re.search(patterns["rtt"], line)
if (match != None):
d["rto"].append(int(match.group(1)))
d["srtt"].append(int(match.group(2)))
d["mrtt-us"].append(int(match.group(3)))
d["rttvar"].append(int(match.group(4)))
if (stats_index == 0):
continue
match = re.search(patterns["rxtt"], line)
if (match != None):
d["rxtTimeout"].append(Point(time, d["cwnd"][stats_index - 1].y + 1e4))
continue
match = re.search(patterns["congestion"], line)
if (match != None):
d["congestion"].append(Point(time, d["cwnd"][stats_index - 1].y - 1e4))
continue
match = re.search(patterns["recovered"], line)
if (match != None):
d["recovered"].append(Point(time, d["cwnd"][stats_index - 1].y))
continue
plot_data(d)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot tcp cc logs")
parser.add_argument('-f', action='store', dest='file', required=True,
help="elog file in txt format")
parser.add_argument('-s', action='store', dest='session_index', default=0,
help="session index for which to plot cc logs" )
results = parser.parse_args()
run(results.file, int(results.session_index))
| |
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: March 2015
File name: IRIS_DF_Demo.py
Organization: RISC Lab, Utah State University
Notes:
This file is meant for demonstrating Differential Flatness
as described in "Aggressive Maneuvers" by Jeff Ferrin. It is
set up to allow toggling between trajectories using the joystick.
======================================================'''
#================================#
# Libraries/modules Needed #
#================================#
import roslib; roslib.load_manifest('ardrone_tutorials')
roslib.load_manifest('risc_msgs')
import rospy
from math import *
import rospkg
import numpy as np
import scipy.linalg as la
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from sensor_msgs.msg import Joy
from std_msgs.msg import Bool
#========================#
# Globals #
#========================#
PI = 3.141592653589793
Threshold = 1000
ThrustCap = .3
states = Cortex()
states.Obj = [States()]*1
euler_max = 0.349066 #in radians
max_yaw_rate = .3490659 #in radians/sec
max_alt_rate = 1000 # in mm/sec
rate = 45 # Hz
start_time = 0
back = 0
forward = 0
mode = 1 # mode of 4 listed under cases
old_mode = 0
ctrl_status = False
cases = ['Origin','Slanted Figure Eight','Origin',\
'Flat Figure Eight','Origin','Circle','Origin','Toroid Knot']
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
pub_traj = rospy.Publisher('/trajectory', Trajectories, queue_size = 1)
#=======================================#
# Estimated Optimal Gain Matrices #
#=======================================#
kp = 1.2
kd = 2.6
K_way = np.matrix([[ 4.2, 0, 0, 5.8, 0, 0, 0],\
[ 0, 4.2, 0, 0, 5.8, 0, 0],\
[ 0, 0, 2, 0, 0, 3, 0],\
[ 0, 0, 0, 0, 0, 0, .3]])
K_slf8 = np.matrix([[ 3.2, 0, 0, 4.6, 0, 0, 0],\
[ 0, 3.2, 0, 0, 4.6, 0, 0],\
[ 0, 0, 2, 0, 0, 3, 0],\
[ 0, 0, 0, 0, 0, 0, .3]])
K_flf8 = np.matrix([[ 3.2, 0, 0, 4.2, 0, 0, 0],\
[ 0, 3.2, 0, 0, 4.2, 0, 0],\
[ 0, 0, 2, 0, 0, 3, 0],\
[ 0, 0, 0, 0, 0, 0, .3]])
K_crcl = np.matrix([[ kp, 0, 0, kd, 0, 0, 0],\
[ 0, kp, 0, 0, kd, 0, 0],\
[ 0, 0, 2, 0, 0, 3, 0],\
[ 0, 0, 0, 0, 0, 0, .3]])
K_toroid = np.matrix([[ kp, 0, 0, kd, 0, 0, 0],\
[ 0, kp, 0, 0, kd, 0, 0],\
[ 0, 0, 2, 0, 0, 3, 0],\
[ 0, 0, 0, 0, 0, 0, .3]])
#========================#
# Integrator gains #
#========================#
ki_x = .5
ki_y = .5
ki_z = .5
ki_xdot = 0
ki_ydot = 0
ki_zdot = 0
ki_psi = 0
K_Int = np.matrix([[ ki_x, 0, 0, ki_xdot, 0, 0, 0],\
[ 0, ki_y, 0, 0, ki_ydot, 0, 0],\
[ 0, 0, ki_z, 0, 0,ki_zdot, 0],\
[ 0, 0, 0, 0, 0, 0, ki_psi]])
Integrator = np.asmatrix(np.zeros((7,1)))
#============================#
# Integrator Threshold #
#============================#
x_int_thresh = 2
y_int_thresh = 2
z_int_thresh = 2.5
xdot_int_thresh = 1
ydot_int_thresh = 1
zdot_int_thresh = 2.5
psi_int_thresh = 0
Int_thresh = np.matrix([[x_int_thresh],[y_int_thresh],[z_int_thresh],\
[xdot_int_thresh],[ydot_int_thresh],[zdot_int_thresh],\
[psi_int_thresh]])
#=========================================#
# Bound Radians between + or - pi/2 #
#=========================================#
def pi2pi(angle):
if abs(angle)>PI/2:
if angle>0:
angle = angle-PI
else:
angle = angle+PI
return angle
#=====================#
# Integrator Cap #
#=====================#
def IntegratorCap(I):
global Int_thresh
good_terms = np.multiply(I,abs(I)<Int_thresh) # leave as is and set others to zero
bad_terms = abs(I)>Int_thresh # set bad terms to 1 and others to 0
Int = good_terms + np.multiply(np.sign(I),np.multiply(bad_terms,Int_thresh))
return Int
#==============#
# Get Joy #
#==============#
def GetJoy(joy):
global start_time, mode, old_mode, forward, back
if (joy.buttons[5] == 1 and forward == 1) or (joy.buttons[4] == 1 and back == -1):
mode = mode
else:
back = -joy.buttons[4]
forward = joy.buttons[5]
old_mode = mode
start_time = rospy.get_time()
mode = mode + back + forward
if mode > 8:
mode = 1
if mode < 1:
mode = 8
#============================#
# Get Controller Status #
#============================#
def GetStatus(S):
global ctrl_status
ctrl_status = S.data
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#==========================#
# Various Trajectories #
#==========================#
def Origin():
global K_way
traj = GetTrajectory(10, 0,0,0,1,0,0,0,mode)
Basic_Controller(traj, K_way)
def Slanted_Figure_8():
global K_slf8,cycles
# Trajectory Variables
period = 12 # seconds
a = 1
b = 0.5
c = 0.5
n = 1
w1 = 2*PI/period
w2 = w1/2
w3 = w1
traj = GetTrajectory(period, a, b, c, n, w1, w2, w3,mode)
Basic_Controller(traj, K_slf8)
def Flat_Figure_8():
global K_flf8,cycles
# Trajectory Variables
period = 10 # seconds
a = 1
b = 0.5
c = 0.0
n = 1
w1 = 2*PI/period
w2 = w1/2
w3 = w1
traj = GetTrajectory(period, a, b, c, n, w1, w2, w3,mode)
Basic_Controller(traj, K_flf8)
def Circle():
global K_crcl
# Trajectory Variables
period = 10 # seconds
a = 0.8
b = 0.8
c = 0
n = 1
w1 = 2*PI/period
w2 = w1
w3 = w1
traj = GetTrajectory(period, a, b, c, n, w1, w2, w3,mode)
Basic_Controller(traj, K_crcl)
def Toroid():
global K_toroid,mode
# Trajectory Variables
period = 15 # seconds
a = 0.65
b = 0.25
c = 0.25
n = 1
q = 3 # number of loops
p = q-1
w1 = 2*PI/period
traj = GetToroid(period, a, b, c, n, q, p, w1,mode)
Basic_Controller(traj, K_toroid)
def Datahandler():
global mode, old_mode
if mode == 1 or mode == 3 or mode == 5 or mode == 7:
Origin()
if old_mode != mode:
global Integrator
Integrator = np.asmatrix(np.zeros((7,1)))
rospy.loginfo("Origin")
old_mode = mode
if mode == 2:
Slanted_Figure_8()
if old_mode != mode:
global Integrator
Integrator = np.asmatrix(np.zeros((7,1)))
rospy.loginfo("Slanted Figure 8 Trajectory")
old_mode = mode
if mode == 4:
Flat_Figure_8()
if old_mode != mode:
global Integrator
Integrator = np.asmatrix(np.zeros((7,1)))
rospy.loginfo("Flat Figure 8 Trajectory")
old_mode = mode
if mode == 6:
mode = 6 # redefine mode to prevent control lockout (I don't know why this is necessary but it fixes it)
Circle()
if old_mode != mode:
global Integrator
Integrator = np.asmatrix(np.zeros((7,1)))
rospy.loginfo("Circular Trajectory")
old_mode = mode
if mode == 8:
mode = 8 # redefine mode to prevent control lockout (I don't know why this is necessary but it fixes it)
Toroid()
if old_mode != mode:
global Integrator
Integrator = np.asmatrix(np.zeros((7,1)))
rospy.loginfo("Toroid Knot Trajectory")
old_mode = mode
if rospy.get_param('controller_status',False):
start_time = rospy.get_time()
#=================#
# Get Toroid #
#=================#
def GetToroid(period,a,b,c,n,q,p,w1,case):
global start_time, cases, pub_traj
time_now = rospy.get_time()
t = time_now-start_time
theta = w1*t
WP = Trajectories()
WP.Obj = [Trajectory()]*1
#=======================#
# Polar Trajectory #
#=======================#
r = a+b*cos(q*theta/p)
z = n+c*sin(q*theta/p)
rdot = -b*q*sin(q*theta/p)
zdot = c*q*cos(q*theta/p)
rddot = -b*q*q*cos(q*theta/p)/(p*p)
zddot = -c*q*q*sin(q*theta/p)/(p*p)
#=================#
# Trajectory #
#=================#
traj = Trajectory()
traj.name = cases[case-1]
# Position
traj.x = r*cos(theta)
traj.y = r*sin(theta)
traj.z = z
traj.psi = 0
# Velocity
traj.xdot = rdot*cos(theta) - r*sin(theta)
traj.ydot = rdot*sin(theta) + r*cos(theta)
traj.zdot = zdot
traj.psidot = 0
# Acceleration
traj.xddot = rddot*cos(theta)-2*rdot*sin(theta)-r*cos(theta)
traj.yddot = rddot*sin(theta)+2*rdot*cos(theta)-r*sin(theta)
traj.zddot = zddot
traj.psiddot = 0
WP.Obj = [traj]
pub_traj.publish(WP)
return WP
#=====================#
# Get Trajectory #
#=====================#
def GetTrajectory(period,a,b,c,n,w1,w2,w3,case):
global start_time, cases, pub_traj
time_now = rospy.get_time()
t = time_now-start_time
WP = Trajectories()
WP.Obj = [Trajectory()]*1
#=================#
# Trajectory #
#=================#
traj = Trajectory()
traj.name = cases[case-1]
# Position
traj.x = a*cos(w2*t)
traj.y = b*sin(w1*t)
traj.z = n+c*sin(w3*t)
traj.psi = 0
# Velocity
traj.xdot = -a*w2*sin(w2*t)
traj.ydot = b*w1*cos(w1*t)
traj.zdot = c*w3*cos(w3*t)
traj.psidot = 0
# Acceleration
traj.xddot = -a*w2*w2*cos(w2*t)
traj.yddot = -b*w1*w1*sin(w1*t)
traj.zddot = -c*w3*w3*sin(w3*t)
traj.psiddot = 0
WP.Obj = [traj]
pub_traj.publish(WP)
return WP
#========================#
# Basic Controller #
#========================#
def Basic_Controller(traj,K):
global states,PI, euler_max, max_yaw_rate, max_alt_rate, pub_ctrl
g = 9.80665
m = 1.282 # IRIS Mass Estimate (not used since thrust is given as a percentage)
Ctrl = Controls()
Ctrl.Obj = [Control()]*1
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = traj.Obj[0].x - states.Obj[0].x
X[1] = traj.Obj[0].y - states.Obj[0].y
X[2] = traj.Obj[0].z - states.Obj[0].z
X[3] = traj.Obj[0].xdot - states.Obj[0].u
X[4] = traj.Obj[0].ydot - states.Obj[0].v
X[5] = traj.Obj[0].zdot - states.Obj[0].w
X[6] = traj.Obj[0].psi - states.Obj[0].psi*np.pi/180
#======================================#
# Only Integrate When Autonomous #
#======================================#
global Integrator
if ctrl_status:
#=======================#
# Integrator Term #
#=======================#
global K_Int
Integrator = Integrator + np.divide(X,rate)
#======================#
# Integrator Cap #
#======================#
global Int_thresh
Integrator = IntegratorCap(Integrator)
elif not ctrl_status:
Integrator = np.asmatrix(np.zeros((7,1)))
#====================================#
# Differential Flatness #
# Input Acceleration Desired #
# in Vehicle Frame #
#====================================#
# LQR input
utilde = -K*X - K_Int*Integrator
# required input
u_r = np.matrix([[traj.Obj[0].xddot],[traj.Obj[0].yddot],[traj.Obj[0].zddot],[traj.Obj[0].psiddot]])
u = utilde+u_r+np.matrix([[0],[0],[9.81],[0]])
#==================================#
# Rotate to Vehicle 1 Frame #
#==================================#
psi = states.Obj[0].psi*np.pi/180 # in radians
rotZ = np.matrix([[cos(psi), sin(psi), 0],[-sin(psi), cos(psi), 0],[0, 0, 1]])
Cart = np.matrix([[1, 0, 0],[0, -1, 0],[0, 0, -1]])
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
z = np.divide(u[:-1],-T)
#==================#
# Set Controls #
#==================#
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = asin(z[1,-1])/euler_max
ctrl.theta = -asin(z[0,-1])/euler_max
ctrl.psi = -u[3,-1]/max_yaw_rate
thrust = 1-T/g
global ThrustCap
if thrust > ThrustCap:
thrust = ThrustCap
if thrust < -ThrustCap:
thrust = -ThrustCap
ctrl.T = thrust
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
pub_ctrl.publish(Ctrl)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('DF_Demo')
#=======================#
# quad parameters #
#=======================#
euler_max = float(rospy.get_param("euler_angle_max","0.78537")) #in radians
max_yaw_rate = float(rospy.get_param("control_yaw","0.3490659")) #in radians/sec
#ki_x = float(rospy.get_param("ki_x","0"))
#ki_y = float(rospy.get_param("ki_y","0"))
#ki_z = float(rospy.get_param("ki_z","0"))
#ki_xdot = float(rospy.get_param("ki_xdot","0"))
#ki_ydot = float(rospy.get_param("ki_ydot","0"))
#ki_zdot = float(rospy.get_param("ki_zdot","0"))
#ki_psi = float(rospy.get_param("ki_psi","0"))
#ThrustCap = float(rospy.get_param("ThrustCap",".4"))
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates)
sub_joy = rospy.Subscriber('/joy' , Joy, GetJoy)
sub_status = rospy.Subscriber('/controller_status' , Bool, GetStatus)
Datahandler()
r.sleep()
| |
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_SNAPSHOTS_TAB_URL = reverse('horizon:project:volumes:snapshots_tab')
class VolumeSnapshotsViewTests(test.TestCase):
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_create_snapshot_get(self):
volume = self.cinder_volumes.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
snapshot_used = len(self.cinder_volume_snapshots.list())
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'snapshotsUsed': snapshot_used,
'maxTotalSnapshots': 6}
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:'
'volumes:create_snapshot', args=[volume.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/volumes/volumes/'
'create_snapshot.html')
@test.create_stubs({cinder: ('volume_get',
'volume_snapshot_create',)})
def test_create_snapshot_post(self):
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
cinder.volume_snapshot_create(IsA(http.HttpRequest),
volume.id,
snapshot.name,
snapshot.description,
force=False) \
.AndReturn(snapshot)
self.mox.ReplayAll()
formData = {'method': 'CreateSnapshotForm',
'tenant_id': self.tenant.id,
'volume_id': volume.id,
'name': snapshot.name,
'description': snapshot.description}
url = reverse('horizon:project:volumes:volumes:create_snapshot',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_SNAPSHOTS_TAB_URL)
@test.create_stubs({cinder: ('volume_get',
'volume_snapshot_create',)})
def test_force_create_snapshot(self):
volume = self.cinder_volumes.get(name='my_volume')
snapshot = self.cinder_volume_snapshots.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
cinder.volume_snapshot_create(IsA(http.HttpRequest),
volume.id,
snapshot.name,
snapshot.description,
force=True) \
.AndReturn(snapshot)
self.mox.ReplayAll()
formData = {'method': 'CreateSnapshotForm',
'tenant_id': self.tenant.id,
'volume_id': volume.id,
'name': snapshot.name,
'description': snapshot.description}
url = reverse('horizon:project:volumes:volumes:create_snapshot',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_SNAPSHOTS_TAB_URL)
@test.create_stubs({api.cinder: ('volume_snapshot_list',
'volume_list',
'volume_backup_supported',
'volume_snapshot_delete')})
def test_delete_volume_snapshot(self):
vol_snapshots = self.cinder_volume_snapshots.list()
volumes = self.cinder_volumes.list()
snapshot = self.cinder_volume_snapshots.first()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)). \
AndReturn(vol_snapshots)
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
api.cinder.volume_snapshot_delete(IsA(http.HttpRequest), snapshot.id)
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)). \
AndReturn([])
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
self.mox.ReplayAll()
formData = {'action':
'volume_snapshots__delete__%s' % snapshot.id}
res = self.client.post(VOLUME_SNAPSHOTS_TAB_URL, formData, follow=True)
self.assertIn("Scheduled deletion of Volume Snapshot: test snapshot",
[m.message for m in res.context['messages']])
@test.create_stubs({api.cinder: ('volume_snapshot_get', 'volume_get')})
def test_volume_snapshot_detail_get(self):
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
api.cinder.volume_get(IsA(http.HttpRequest), volume.id). \
AndReturn(volume)
api.cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id). \
AndReturn(snapshot)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:snapshots:detail',
args=[snapshot.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Snapshot Details: %s</h1>" %
snapshot.name,
1, 200)
self.assertContains(res, "<dd>test snapshot</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % snapshot.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
@test.create_stubs({api.cinder: ('volume_snapshot_get',)})
def test_volume_snapshot_detail_get_with_exception(self):
# Test to verify redirect if get volume snapshot fails
snapshot = self.cinder_volume_snapshots.first()
api.cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:snapshots:detail',
args=[snapshot.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.cinder: ('volume_snapshot_get', 'volume_get')})
def test_volume_snapshot_detail_with_volume_get_exception(self):
# Test to verify redirect if get volume fails
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
api.cinder.volume_get(IsA(http.HttpRequest), volume.id). \
AndRaise(self.exceptions.cinder)
api.cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id). \
AndReturn(snapshot)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:snapshots:detail',
args=[snapshot.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({cinder: ('volume_snapshot_update',
'volume_snapshot_get')})
def test_update_snapshot(self):
snapshot = self.cinder_volume_snapshots.first()
cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id) \
.AndReturn(snapshot)
cinder.volume_snapshot_update(IsA(http.HttpRequest),
snapshot.id,
snapshot.name,
snapshot.description) \
.AndReturn(snapshot)
self.mox.ReplayAll()
formData = {'method': 'UpdateSnapshotForm',
'name': snapshot.name,
'description': snapshot.description}
url = reverse(('horizon:project:volumes:snapshots:update'),
args=[snapshot.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
| |
# Contains standalone functions to accompany the index implementation and make it
# more versatile
# NOTE: Autodoc hates it if this is a docstring
from stat import (
S_IFDIR,
S_IFLNK,
S_ISLNK,
S_IFDIR,
S_ISDIR,
S_IFMT,
S_IFREG,
)
S_IFGITLINK = S_IFLNK | S_IFDIR # a submodule
from cStringIO import StringIO
from git.util import IndexFileSHA1Writer
from git.exc import UnmergedEntriesError
from git.objects.fun import (
tree_to_stream,
traverse_tree_recursive,
traverse_trees_recursive
)
from typ import (
BaseIndexEntry,
IndexEntry,
CE_NAMEMASK,
CE_STAGESHIFT
)
CE_NAMEMASK_INV = ~CE_NAMEMASK
from util import (
pack,
unpack
)
from gitdb.base import IStream
from gitdb.typ import str_tree_type
__all__ = ('write_cache', 'read_cache', 'write_tree_from_cache', 'entry_key',
'stat_mode_to_index_mode', 'S_IFGITLINK')
def stat_mode_to_index_mode(mode):
"""Convert the given mode from a stat call to the corresponding index mode
and return it"""
if S_ISLNK(mode): # symlinks
return S_IFLNK
if S_ISDIR(mode) or S_IFMT(mode) == S_IFGITLINK: # submodules
return S_IFGITLINK
return S_IFREG | 0644 | (mode & 0100) # blobs with or without executable bit
def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1Writer):
"""Write the cache represented by entries to a stream
:param entries: **sorted** list of entries
:param stream: stream to wrap into the AdapterStreamCls - it is used for
final output.
:param ShaStreamCls: Type to use when writing to the stream. It produces a sha
while writing to it, before the data is passed on to the wrapped stream
:param extension_data: any kind of data to write as a trailer, it must begin
a 4 byte identifier, followed by its size ( 4 bytes )"""
# wrap the stream into a compatible writer
stream = ShaStreamCls(stream)
tell = stream.tell
write = stream.write
# header
version = 2
write("DIRC")
write(pack(">LL", version, len(entries)))
# body
for entry in entries:
beginoffset = tell()
write(entry[4]) # ctime
write(entry[5]) # mtime
path = entry[3]
plen = len(path) & CE_NAMEMASK # path length
assert plen == len(path), "Path %s too long to fit into index" % entry[3]
flags = plen | (entry[2] & CE_NAMEMASK_INV) # clear possible previous values
write(pack(">LLLLLL20sH", entry[6], entry[7], entry[0],
entry[8], entry[9], entry[10], entry[1], flags))
write(path)
real_size = ((tell() - beginoffset + 8) & ~7)
write("\0" * ((beginoffset + real_size) - tell()))
# END for each entry
# write previously cached extensions data
if extension_data is not None:
stream.write(extension_data)
# write the sha over the content
stream.write_sha()
def read_header(stream):
"""Return tuple(version_long, num_entries) from the given stream"""
type_id = stream.read(4)
if type_id != "DIRC":
raise AssertionError("Invalid index file header: %r" % type_id)
version, num_entries = unpack(">LL", stream.read(4 * 2))
# TODO: handle version 3: extended data, see read-cache.c
assert version in (1, 2)
return version, num_entries
def entry_key(*entry):
""":return: Key suitable to be used for the index.entries dictionary
:param entry: One instance of type BaseIndexEntry or the path and the stage"""
if len(entry) == 1:
return (entry[0].path, entry[0].stage)
else:
return tuple(entry)
# END handle entry
def read_cache(stream):
"""Read a cache file from the given stream
:return: tuple(version, entries_dict, extension_data, content_sha)
* version is the integer version number
* entries dict is a dictionary which maps IndexEntry instances to a path
at a stage
* extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes
* content_sha is a 20 byte sha on all cache file contents"""
version, num_entries = read_header(stream)
count = 0
entries = dict()
read = stream.read
tell = stream.tell
while count < num_entries:
beginoffset = tell()
ctime = unpack(">8s", read(8))[0]
mtime = unpack(">8s", read(8))[0]
(dev, ino, mode, uid, gid, size, sha, flags) = \
unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
path_size = flags & CE_NAMEMASK
path = read(path_size)
real_size = ((tell() - beginoffset + 8) & ~7)
data = read((beginoffset + real_size) - tell())
entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
# entry_key would be the method to use, but we safe the effort
entries[(path, entry.stage)] = entry
count += 1
# END for each entry
# the footer contains extension data and a sha on the content so far
# Keep the extension footer,and verify we have a sha in the end
# Extension data format is:
# 4 bytes ID
# 4 bytes length of chunk
# repeated 0 - N times
extension_data = stream.read(~0)
assert len(extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size" % len(extension_data)
content_sha = extension_data[-20:]
# truncate the sha in the end as we will dynamically create it anyway
extension_data = extension_data[:-20]
return (version, entries, extension_data, content_sha)
def write_tree_from_cache(entries, odb, sl, si=0):
"""Create a tree from the given sorted list of entries and put the respective
trees into the given object database
:param entries: **sorted** list of IndexEntries
:param odb: object database to store the trees in
:param si: start index at which we should start creating subtrees
:param sl: slice indicating the range we should process on the entries list
:return: tuple(binsha, list(tree_entry, ...)) a tuple of a sha and a list of
tree entries being a tuple of hexsha, mode, name"""
tree_items = list()
tree_items_append = tree_items.append
ci = sl.start
end = sl.stop
while ci < end:
entry = entries[ci]
if entry.stage != 0:
raise UnmergedEntriesError(entry)
# END abort on unmerged
ci += 1
rbound = entry.path.find('/', si)
if rbound == -1:
# its not a tree
tree_items_append((entry.binsha, entry.mode, entry.path[si:]))
else:
# find common base range
base = entry.path[si:rbound]
xi = ci
while xi < end:
oentry = entries[xi]
orbound = oentry.path.find('/', si)
if orbound == -1 or oentry.path[si:orbound] != base:
break
# END abort on base mismatch
xi += 1
# END find common base
# enter recursion
# ci - 1 as we want to count our current item as well
sha, tree_entry_list = write_tree_from_cache(entries, odb, slice(ci-1, xi), rbound+1)
tree_items_append((sha, S_IFDIR, base))
# skip ahead
ci = xi
# END handle bounds
# END for each entry
# finally create the tree
sio = StringIO()
tree_to_stream(tree_items, sio.write)
sio.seek(0)
istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio))
return (istream.binsha, tree_items)
def _tree_entry_to_baseindexentry(tree_entry, stage):
return BaseIndexEntry((tree_entry[1], tree_entry[0], stage <<CE_STAGESHIFT, tree_entry[2]))
def aggressive_tree_merge(odb, tree_shas):
"""
:return: list of BaseIndexEntries representing the aggressive merge of the given
trees. All valid entries are on stage 0, whereas the conflicting ones are left
on stage 1, 2 or 3, whereas stage 1 corresponds to the common ancestor tree,
2 to our tree and 3 to 'their' tree.
:param tree_shas: 1, 2 or 3 trees as identified by their binary 20 byte shas
If 1 or two, the entries will effectively correspond to the last given tree
If 3 are given, a 3 way merge is performed"""
out = list()
out_append = out.append
# one and two way is the same for us, as we don't have to handle an existing
# index, instrea
if len(tree_shas) in (1,2):
for entry in traverse_tree_recursive(odb, tree_shas[-1], ''):
out_append(_tree_entry_to_baseindexentry(entry, 0))
# END for each entry
return out
# END handle single tree
if len(tree_shas) > 3:
raise ValueError("Cannot handle %i trees at once" % len(tree_shas))
# three trees
for base, ours, theirs in traverse_trees_recursive(odb, tree_shas, ''):
if base is not None:
# base version exists
if ours is not None:
# ours exists
if theirs is not None:
# it exists in all branches, if it was changed in both
# its a conflict, otherwise we take the changed version
# This should be the most common branch, so it comes first
if( base[0] != ours[0] and base[0] != theirs[0] and ours[0] != theirs[0] ) or \
( base[1] != ours[1] and base[1] != theirs[1] and ours[1] != theirs[1] ):
# changed by both
out_append(_tree_entry_to_baseindexentry(base, 1))
out_append(_tree_entry_to_baseindexentry(ours, 2))
out_append(_tree_entry_to_baseindexentry(theirs, 3))
elif base[0] != ours[0] or base[1] != ours[1]:
# only we changed it
out_append(_tree_entry_to_baseindexentry(ours, 0))
else:
# either nobody changed it, or they did. In either
# case, use theirs
out_append(_tree_entry_to_baseindexentry(theirs, 0))
# END handle modification
else:
if ours[0] != base[0] or ours[1] != base[1]:
# they deleted it, we changed it, conflict
out_append(_tree_entry_to_baseindexentry(base, 1))
out_append(_tree_entry_to_baseindexentry(ours, 2))
# else:
# we didn't change it, ignore
# pass
# END handle our change
# END handle theirs
else:
if theirs is None:
# deleted in both, its fine - its out
pass
else:
if theirs[0] != base[0] or theirs[1] != base[1]:
# deleted in ours, changed theirs, conflict
out_append(_tree_entry_to_baseindexentry(base, 1))
out_append(_tree_entry_to_baseindexentry(theirs, 3))
# END theirs changed
#else:
# theirs didnt change
# pass
# END handle theirs
# END handle ours
else:
# all three can't be None
if ours is None:
# added in their branch
out_append(_tree_entry_to_baseindexentry(theirs, 0))
elif theirs is None:
# added in our branch
out_append(_tree_entry_to_baseindexentry(ours, 0))
else:
# both have it, except for the base, see whether it changed
if ours[0] != theirs[0] or ours[1] != theirs[1]:
out_append(_tree_entry_to_baseindexentry(ours, 2))
out_append(_tree_entry_to_baseindexentry(theirs, 3))
else:
# it was added the same in both
out_append(_tree_entry_to_baseindexentry(ours, 0))
# END handle two items
# END handle heads
# END handle base exists
# END for each entries tuple
return out
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from nova import block_device
from nova.compute import arch
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_block_device
import nova.tests.unit.image.fake
from nova.virt import block_device as driver_block_device
from nova.virt.libvirt import blockinfo
class LibvirtBlockInfoTest(test.NoDBTestCase):
def setUp(self):
super(LibvirtBlockInfoTest, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': 2, # m1.tiny
'config_drive': None,
'system_metadata': {},
}
flavor = objects.Flavor(memory_mb=128,
root_gb=0,
name='m1.micro',
ephemeral_gb=0,
vcpus=1,
swap=0,
rxtx_factor=1.0,
flavorid='1',
vcpu_weight=None,
id=2)
self.test_instance['flavor'] = flavor
self.test_instance['old_flavor'] = None
self.test_instance['new_flavor'] = None
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/sdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/sdd', 'size': 10}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
self.assertEqual(
true_or_false,
block_device.volume_in_mapping(device_name,
block_device_info))
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_find_disk_dev(self):
mapping = {
"disk.local": {
'dev': 'sda',
'bus': 'scsi',
'type': 'disk',
},
"disk.swap": {
'dev': 'sdc',
'bus': 'scsi',
'type': 'disk',
},
}
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
self.assertEqual('sdb', dev)
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
last_device=True)
self.assertEqual('sdz', dev)
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
self.assertEqual('vda', dev)
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'fdc')
self.assertEqual('fda', dev)
def test_get_next_disk_dev(self):
mapping = {}
mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
'virtio')
self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'},
mapping['disk.local'])
mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
'virtio')
self.assertEqual({'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'},
mapping['disk.swap'])
mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
'ide',
'cdrom',
True)
self.assertEqual({'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'},
mapping['disk.config'])
def test_get_next_disk_dev_boot_index(self):
info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=-1)
self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, info)
info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=2)
self.assertEqual({'dev': 'vda', 'bus': 'virtio',
'type': 'disk', 'boot_index': '2'},
info)
def test_get_disk_mapping_simple(self):
# The simplest possible disk mapping setup, all defaults
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_rootdev(self):
# A simple disk mapping setup, but with custom root device name
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'root_device_name': '/dev/sda'
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'scsi', 'dev': 'sda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'root': {'bus': 'scsi', 'dev': 'sda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_rescue(self):
# A simple disk mapping setup, but in rescue mode
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
rescue=True)
expect = {
'disk.rescue': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_lxc(self):
# A simple disk mapping setup, but for lxc
self.test_instance['ephemeral_gb'] = 0
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
"lxc", "lxc",
image_meta)
expect = {
'disk': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
'root': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_iso(self):
# A simple disk mapping setup, but with a ISO for root device
instance_ref = objects.Instance(**self.test_instance)
image_meta = {'disk_format': 'iso'}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'ide', 'dev': 'hda',
'type': 'cdrom', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'root': {'bus': 'ide', 'dev': 'hda',
'type': 'cdrom', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_swap(self):
# A simple disk mapping setup, but with a swap device added
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.swap = 5
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_configdrive(self):
# A simple disk mapping setup, but with configdrive added
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
self.flags(force_config_drive=True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
# The last device is selected for this. on x86 is the last ide
# device (hdd). Since power only support scsi, the last device
# is sdz
bus_ppc = ("scsi", "sdz")
expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
("ide", "hdd"))
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_cdrom_configdrive(self):
# A simple disk mapping setup, with configdrive added as cdrom
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
self.flags(force_config_drive=True)
self.flags(config_drive_format='iso9660')
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
bus_ppc = ("scsi", "sdz")
expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
("ide", "hdd"))
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_disk_configdrive(self):
# A simple disk mapping setup, with configdrive added as disk
self.flags(force_config_drive=True)
self.flags(config_drive_format='vfat')
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_ephemeral(self):
# A disk mapping with ephemeral devices
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.swap = 5
image_meta = {}
block_device_info = {
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/vdc', 'size': 10},
{'device_type': 'floppy',
'device_name': '/dev/vdd', 'size': 10},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'format': 'ext4'},
'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'},
'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_custom_swap(self):
# A disk mapping with a swap device at position vdb. This
# should cause disk.local to be removed
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'swap': {'device_name': '/dev/vdb',
'swap_size': 10},
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_blockdev_root(self):
# A disk mapping with a blockdev replacing the default root
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'boot_index': 0,
'device_type': 'disk',
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'/dev/vda': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_blockdev_eph(self):
# A disk mapping with a blockdev replacing the ephemeral device
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vdb",
'boot_index': -1,
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_blockdev_many(self):
# A disk mapping with a blockdev replacing all devices
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'boot_index': 0,
'disk_bus': 'scsi',
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdb",
'boot_index': -1,
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdc",
'boot_index': -1,
'device_type': 'cdrom',
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'/dev/vda': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom'},
'root': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_complex(self):
# The strangest possible disk mapping setup
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'root_device_name': '/dev/vdf',
'swap': {'device_name': '/dev/vdy',
'swap_size': 10},
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/vdc', 'size': 10},
],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'boot_index': 1,
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
'/dev/vda': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '2'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'format': 'ext4'},
'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_updates_original(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'root_device_name': '/dev/vda',
'swap': {'device_name': '/dev/vdb',
'device_type': 'really_lame_type',
'swap_size': 10},
'ephemerals': [{'disk_bus': 'no_such_bus',
'device_type': 'yeah_right',
'device_name': '/dev/vdc', 'size': 10}],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': None,
'device_type': 'lawnmower',
'delete_on_termination': True}]
}
expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
'device_type': 'disk', 'swap_size': 10}
expected_ephemeral = {'disk_bus': 'virtio',
'device_type': 'disk',
'device_name': '/dev/vdc', 'size': 10}
expected_bdm = {'connection_info': "fake",
'mount_device': '/dev/vdd',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True}
blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
self.assertEqual(expected_swap, block_device_info['swap'])
self.assertEqual(expected_ephemeral,
block_device_info['ephemerals'][0])
self.assertEqual(expected_bdm,
block_device_info['block_device_mapping'][0])
def test_get_disk_bus(self):
expected = (
(arch.X86_64, 'disk', 'virtio'),
(arch.X86_64, 'cdrom', 'ide'),
(arch.X86_64, 'floppy', 'fdc'),
(arch.PPC, 'disk', 'virtio'),
(arch.PPC, 'cdrom', 'scsi'),
(arch.PPC64, 'disk', 'virtio'),
(arch.PPC64, 'cdrom', 'scsi'),
(arch.S390, 'disk', 'virtio'),
(arch.S390, 'cdrom', 'scsi'),
(arch.S390X, 'disk', 'virtio'),
(arch.S390X, 'cdrom', 'scsi')
)
image_meta = {}
for guestarch, dev, res in expected:
with mock.patch.object(blockinfo.libvirt_utils,
'get_arch',
return_value=guestarch):
bus = blockinfo.get_disk_bus_for_device_type('kvm',
image_meta, dev)
self.assertEqual(res, bus)
expected = (
('scsi', None, 'disk', 'scsi'),
(None, 'scsi', 'cdrom', 'scsi'),
('usb', None, 'disk', 'usb')
)
for dbus, cbus, dev, res in expected:
image_meta = {'properties': {'hw_disk_bus': dbus,
'hw_cdrom_bus': cbus}}
bus = blockinfo.get_disk_bus_for_device_type('kvm',
image_meta,
device_type=dev)
self.assertEqual(res, bus)
image_meta = {'properties': {'hw_disk_bus': 'xen'}}
self.assertRaises(exception.UnsupportedHardware,
blockinfo.get_disk_bus_for_device_type,
'kvm',
image_meta)
def test_success_get_disk_bus_for_disk_dev(self):
expected = (
('ide', ("kvm", "hda")),
('scsi', ("kvm", "sdf")),
('virtio', ("kvm", "vds")),
('fdc', ("kvm", "fdc")),
('uml', ("kvm", "ubd")),
('xen', ("xen", "sdf")),
('xen', ("xen", "xvdb"))
)
for res, args in expected:
self.assertEqual(res, blockinfo.get_disk_bus_for_disk_dev(*args))
def test_fail_get_disk_bus_for_disk_dev(self):
self.assertRaises(exception.NovaException,
blockinfo.get_disk_bus_for_disk_dev, 'inv', 'val')
def test_get_config_drive_type_default(self):
config_drive_type = blockinfo.get_config_drive_type()
self.assertEqual('cdrom', config_drive_type)
def test_get_config_drive_type_cdrom(self):
self.flags(config_drive_format='iso9660')
config_drive_type = blockinfo.get_config_drive_type()
self.assertEqual('cdrom', config_drive_type)
def test_get_config_drive_type_disk(self):
self.flags(config_drive_format='vfat')
config_drive_type = blockinfo.get_config_drive_type()
self.assertEqual('disk', config_drive_type)
def test_get_config_drive_type_improper_value(self):
self.flags(config_drive_format='test')
self.assertRaises(exception.ConfigDriveUnknownFormat,
blockinfo.get_config_drive_type)
def test_get_info_from_bdm(self):
bdms = [{'device_name': '/dev/vds', 'device_type': 'disk',
'disk_bus': 'usb', 'swap_size': 4},
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 2},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/vdc', 'size': 3},
{'connection_info': "fake",
'mount_device': "/dev/sdr",
'disk_bus': 'lame_bus',
'device_type': 'cdrom',
'boot_index': 0,
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdo",
'disk_bus': 'scsi',
'boot_index': 1,
'device_type': 'lame_type',
'delete_on_termination': True}]
expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
{'dev': 'vdb', 'type': 'disk',
'bus': 'virtio', 'format': 'ext4'},
{'dev': 'vdc', 'type': 'disk', 'bus': 'ide'},
{'dev': 'sdr', 'type': 'cdrom',
'bus': 'scsi', 'boot_index': '1'},
{'dev': 'vdo', 'type': 'disk',
'bus': 'scsi', 'boot_index': '2'}]
image_meta = {}
for bdm, expected in zip(bdms, expected):
self.assertEqual(expected,
blockinfo.get_info_from_bdm('kvm',
image_meta,
bdm))
# Test that passed bus and type are considered
bdm = {'device_name': '/dev/vda'}
expected = {'dev': 'vda', 'type': 'disk', 'bus': 'ide'}
self.assertEqual(
expected, blockinfo.get_info_from_bdm('kvm',
image_meta,
bdm,
disk_bus='ide',
dev_type='disk'))
# Test that lame bus values are defaulted properly
bdm = {'disk_bus': 'lame_bus', 'device_type': 'cdrom'}
with mock.patch.object(blockinfo,
'get_disk_bus_for_device_type',
return_value='ide') as get_bus:
blockinfo.get_info_from_bdm('kvm',
image_meta,
bdm)
get_bus.assert_called_once_with('kvm', image_meta, 'cdrom')
# Test that missing device is defaulted as expected
bdm = {'disk_bus': 'ide', 'device_type': 'cdrom'}
expected = {'dev': 'vdd', 'type': 'cdrom', 'bus': 'ide'}
mapping = {'root': {'dev': 'vda'}}
with mock.patch.object(blockinfo,
'find_disk_dev_for_disk_bus',
return_value='vdd') as find_dev:
got = blockinfo.get_info_from_bdm(
'kvm',
image_meta,
bdm,
mapping,
assigned_devices=['vdb', 'vdc'])
find_dev.assert_called_once_with(
{'root': {'dev': 'vda'},
'vdb': {'dev': 'vdb'},
'vdc': {'dev': 'vdc'}}, 'ide')
self.assertEqual(expected, got)
def test_get_device_name(self):
bdm_obj = objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}))
self.assertEqual('/dev/vda', blockinfo.get_device_name(bdm_obj))
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
self.assertEqual('/dev/vda', blockinfo.get_device_name(driver_bdm))
bdm_obj.device_name = None
self.assertIsNone(blockinfo.get_device_name(bdm_obj))
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
self.assertIsNone(blockinfo.get_device_name(driver_bdm))
@mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus',
return_value='vda')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev',
return_value='virtio')
def test_get_root_info_no_bdm(self, mock_get_bus, mock_find_dev):
image_meta = {}
blockinfo.get_root_info('kvm', image_meta, None, 'virtio', 'ide')
mock_find_dev.assert_called_once_with({}, 'virtio')
blockinfo.get_root_info('kvm', image_meta, None, 'virtio', 'ide',
root_device_name='/dev/vda')
mock_get_bus.assert_called_once_with('kvm', '/dev/vda')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_get_root_info_bdm(self, mock_get_info):
image_meta = {}
root_bdm = {'mount_device': '/dev/vda',
'disk_bus': 'scsi',
'device_type': 'disk'}
# No root_device_name
blockinfo.get_root_info('kvm', image_meta, root_bdm, 'virtio', 'ide')
mock_get_info.assert_called_once_with('kvm', image_meta,
root_bdm, {}, 'virtio')
mock_get_info.reset_mock()
# Both device names
blockinfo.get_root_info('kvm', image_meta, root_bdm, 'virtio', 'ide',
root_device_name='sda')
mock_get_info.assert_called_once_with('kvm', image_meta,
root_bdm, {}, 'virtio')
mock_get_info.reset_mock()
# Missing device names
del root_bdm['mount_device']
blockinfo.get_root_info('kvm', image_meta, root_bdm, 'virtio', 'ide',
root_device_name='sda')
mock_get_info.assert_called_once_with('kvm',
image_meta,
{'device_name': 'sda',
'disk_bus': 'scsi',
'device_type': 'disk'},
{}, 'virtio')
def test_get_boot_order_simple(self):
disk_info = {
'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
}
expected_order = ['hd']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
def test_get_boot_order_complex(self):
disk_info = {
'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {
'disk': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
'/dev/hda': {'bus': 'ide', 'dev': 'hda',
'type': 'cdrom', 'boot_index': '3'},
'/dev/fda': {'bus': 'fdc', 'dev': 'fda',
'type': 'floppy', 'boot_index': '2'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'format': 'ext4'},
'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
}
}
expected_order = ['hd', 'fd', 'cdrom']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
def test_get_boot_order_overlapping(self):
disk_info = {
'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {
'/dev/vda': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'boot_index': '2'},
'/dev/vdc': {'bus': 'virtio', 'dev': 'vdc',
'type': 'cdrom', 'boot_index': '3'},
'root': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
}
expected_order = ['hd', 'cdrom']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
class DefaultDeviceNamesTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultDeviceNamesTestCase, self).setUp()
self.context = context.get_admin_context()
self.instance = objects.Instance(
uuid='32dfcb37-5af1-552b-357c-be8c3aa38310',
memory_kb='1024000',
basepath='/some/path',
bridge_name='br100',
vcpus=2,
project_id='fake',
bridge='br101',
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
root_gb=10,
ephemeral_gb=20,
instance_type_id=2,
config_drive=False,
system_metadata={})
self.root_device_name = '/dev/vda'
self.virt_type = 'kvm'
self.flavor = objects.Flavor(swap=4)
self.patchers = []
self.patchers.append(mock.patch.object(self.instance, 'get_flavor',
return_value=self.flavor))
self.patchers.append(mock.patch(
'nova.objects.block_device.BlockDeviceMapping.save'))
for patcher in self.patchers:
patcher.start()
self.ephemerals = [objects.BlockDeviceMapping(
self.context, **fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True,
'guest_format': None,
'volume_size': 1,
'boot_index': -1}))]
self.swap = [objects.BlockDeviceMapping(
self.context, **fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': 1,
'boot_index': -1}))]
self.block_device_mapping = [
objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'disk_bus': 'virtio',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})),
objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdd',
'source_type': 'snapshot',
'device_type': 'disk',
'disk_bus': 'virtio',
'destination_type': 'volume',
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1})),
objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vde',
'source_type': 'blank',
'device_type': 'disk',
'disk_bus': 'virtio',
'destination_type': 'volume',
'boot_index': -1}))]
def tearDown(self):
super(DefaultDeviceNamesTestCase, self).tearDown()
for patcher in self.patchers:
patcher.stop()
def _test_default_device_names(self, eph, swap, bdm):
image_meta = {}
blockinfo.default_device_names(self.virt_type,
self.context,
self.instance,
self.root_device_name,
eph, swap, bdm,
image_meta)
def test_only_block_device_mapping(self):
# Test no-op
original_bdm = copy.deepcopy(self.block_device_mapping)
self._test_default_device_names([], [], self.block_device_mapping)
for original, defaulted in zip(
original_bdm, self.block_device_mapping):
self.assertEqual(original.device_name, defaulted.device_name)
# Assert it defaults the missing one as expected
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], [], self.block_device_mapping)
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
def test_with_ephemerals(self):
# Test ephemeral gets assigned
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
def test_with_swap(self):
# Test swap only
self.swap[0]['device_name'] = None
self._test_default_device_names([], self.swap, [])
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
# Test swap and block_device_mapping
self.swap[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], self.swap,
self.block_device_mapping)
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
def test_all_together(self):
# Test swap missing
self.swap[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
# Test swap and eph missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
# Test all missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
| |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010 (ita)
"""
Various configuration tests.
"""
from waflib import Task
from waflib.Configure import conf
from waflib.TaskGen import feature, before_method, after_method
import sys
LIB_CODE = '''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllexport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void) { return 9; }
'''
MAIN_CODE = '''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllimport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void);
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(lib_func() == 9);
}
'''
@feature('link_lib_test')
@before_method('process_source')
def link_lib_test_fun(self):
"""
The configuration test :py:func:`waflib.Configure.run_build` declares a unique task generator,
so we need to create other task generators from here to check if the linker is able to link libraries.
"""
def write_test_file(task):
task.outputs[0].write(task.generator.code)
rpath = []
if getattr(self, 'add_rpath', False):
rpath = [self.bld.path.get_bld().abspath()]
mode = self.mode
m = '%s %s' % (mode, mode)
ex = self.test_exec and 'test_exec' or ''
bld = self.bld
bld(rule=write_test_file, target='test.' + mode, code=LIB_CODE)
bld(rule=write_test_file, target='main.' + mode, code=MAIN_CODE)
bld(features='%sshlib' % m, source='test.' + mode, target='test')
bld(features='%sprogram %s' % (m, ex), source='main.' + mode, target='app', use='test', rpath=rpath)
@conf
def check_library(self, mode=None, test_exec=True):
"""
Check if libraries can be linked with the current linker. Uses :py:func:`waflib.Tools.c_tests.link_lib_test_fun`.
:param mode: c or cxx or d
:type mode: string
"""
if not mode:
mode = 'c'
if self.env.CXX:
mode = 'cxx'
self.check(
compile_filename = [],
features = 'link_lib_test',
msg = 'Checking for libraries',
mode = mode,
test_exec = test_exec,
)
########################################################################################
INLINE_CODE = '''
typedef int foo_t;
static %s foo_t static_foo () {return 0; }
%s foo_t foo () {
return 0;
}
'''
INLINE_VALUES = ['inline', '__inline__', '__inline']
@conf
def check_inline(self, **kw):
"""
Check for the right value for inline macro.
Define INLINE_MACRO to 1 if the define is found.
If the inline macro is not 'inline', add a define to the ``config.h`` (#define inline __inline__)
:param define_name: define INLINE_MACRO by default to 1 if the macro is defined
:type define_name: string
:param features: by default *c* or *cxx* depending on the compiler present
:type features: list of string
"""
self.start_msg('Checking for inline')
if not 'define_name' in kw:
kw['define_name'] = 'INLINE_MACRO'
if not 'features' in kw:
if self.env.CXX:
kw['features'] = ['cxx']
else:
kw['features'] = ['c']
for x in INLINE_VALUES:
kw['fragment'] = INLINE_CODE % (x, x)
try:
self.check(**kw)
except self.errors.ConfigurationError:
continue
else:
self.end_msg(x)
if x != 'inline':
self.define('inline', x, quote=False)
return x
self.fatal('could not use inline functions')
########################################################################################
LARGE_FRAGMENT = '''#include <unistd.h>
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(sizeof(off_t) >= 8);
}
'''
@conf
def check_large_file(self, **kw):
"""
Check for large file support and define the macro HAVE_LARGEFILE
The test is skipped on win32 systems (DEST_BINFMT == pe).
:param define_name: define to set, by default *HAVE_LARGEFILE*
:type define_name: string
:param execute: execute the test (yes by default)
:type execute: bool
"""
if not 'define_name' in kw:
kw['define_name'] = 'HAVE_LARGEFILE'
if not 'execute' in kw:
kw['execute'] = True
if not 'features' in kw:
if self.env.CXX:
kw['features'] = ['cxx', 'cxxprogram']
else:
kw['features'] = ['c', 'cprogram']
kw['fragment'] = LARGE_FRAGMENT
kw['msg'] = 'Checking for large file support'
ret = True
try:
if self.env.DEST_BINFMT != 'pe':
ret = self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
if ret:
return True
kw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64'
kw['defines'] = ['_FILE_OFFSET_BITS=64']
try:
ret = self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
self.define('_FILE_OFFSET_BITS', 64)
return ret
self.fatal('There is no support for large files')
########################################################################################
ENDIAN_FRAGMENT = '''
short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
int use_ascii (int i) {
return ascii_mm[i] + ascii_ii[i];
}
short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
int use_ebcdic (int i) {
return ebcdic_mm[i] + ebcdic_ii[i];
}
extern int foo;
'''
class grep_for_endianness(Task.Task):
color = 'PINK'
def run(self):
txt = self.inputs[0].read(flags='rb').decode('iso8859-1')
if txt.find('LiTTleEnDian') > -1:
self.generator.tmp.append('little')
elif txt.find('BIGenDianSyS') > -1:
self.generator.tmp.append('big')
else:
return -1
@feature('grep_for_endianness')
@after_method('process_source')
def grep_for_endianness_fun(self):
"""
Used by the endiannes configuration test
"""
self.create_task('grep_for_endianness', self.compiled_tasks[0].outputs[0])
@conf
def check_endianness(self):
"""
Execute a configuration test to determine the endianness
"""
tmp = []
def check_msg(self):
return tmp[0]
self.check(fragment=ENDIAN_FRAGMENT, features='c grep_for_endianness', msg="Checking for endianness", define='ENDIANNESS', tmp=tmp, okmsg=check_msg)
return tmp[0]
| |
"""Support for monitoring an SABnzbd NZB client."""
from datetime import timedelta
import logging
from pysabnzbd import SabnzbdApi, SabnzbdApiException
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_SABNZBD
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
DATA_GIGABYTES,
DATA_MEGABYTES,
DATA_RATE_MEGABYTES_PER_SECOND,
)
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
DOMAIN = "sabnzbd"
DATA_SABNZBD = "sabznbd"
_CONFIGURING = {}
ATTR_SPEED = "speed"
BASE_URL_FORMAT = "{}://{}:{}/"
CONFIG_FILE = "sabnzbd.conf"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "SABnzbd"
DEFAULT_PORT = 8080
DEFAULT_SPEED_LIMIT = "100"
DEFAULT_SSL = False
UPDATE_INTERVAL = timedelta(seconds=30)
SERVICE_PAUSE = "pause"
SERVICE_RESUME = "resume"
SERVICE_SET_SPEED = "set_speed"
SIGNAL_SABNZBD_UPDATED = "sabnzbd_updated"
SENSOR_TYPES = {
"current_status": ["Status", None, "status"],
"speed": ["Speed", DATA_RATE_MEGABYTES_PER_SECOND, "kbpersec"],
"queue_size": ["Queue", DATA_MEGABYTES, "mb"],
"queue_remaining": ["Left", DATA_MEGABYTES, "mbleft"],
"disk_size": ["Disk", DATA_GIGABYTES, "diskspacetotal1"],
"disk_free": ["Disk Free", DATA_GIGABYTES, "diskspace1"],
"queue_count": ["Queue Count", None, "noofslots_total"],
"day_size": ["Daily Total", DATA_GIGABYTES, "day_size"],
"week_size": ["Weekly Total", DATA_GIGABYTES, "week_size"],
"month_size": ["Monthly Total", DATA_GIGABYTES, "month_size"],
"total_size": ["Total", DATA_GIGABYTES, "total_size"],
}
SPEED_LIMIT_SCHEMA = vol.Schema(
{vol.Optional(ATTR_SPEED, default=DEFAULT_SPEED_LIMIT): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PATH): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_check_sabnzbd(sab_api):
"""Check if we can reach SABnzbd."""
try:
await sab_api.check_available()
return True
except SabnzbdApiException:
_LOGGER.error("Connection to SABnzbd API failed")
return False
async def async_configure_sabnzbd(
hass, config, use_ssl, name=DEFAULT_NAME, api_key=None
):
"""Try to configure Sabnzbd and request api key if configuration fails."""
host = config[CONF_HOST]
port = config[CONF_PORT]
web_root = config.get(CONF_PATH)
uri_scheme = "https" if use_ssl else "http"
base_url = BASE_URL_FORMAT.format(uri_scheme, host, port)
if api_key is None:
conf = await hass.async_add_job(load_json, hass.config.path(CONFIG_FILE))
api_key = conf.get(base_url, {}).get(CONF_API_KEY, "")
sab_api = SabnzbdApi(
base_url, api_key, web_root=web_root, session=async_get_clientsession(hass)
)
if await async_check_sabnzbd(sab_api):
async_setup_sabnzbd(hass, sab_api, config, name)
else:
async_request_configuration(hass, config, base_url, web_root)
async def async_setup(hass, config):
"""Set up the SABnzbd component."""
async def sabnzbd_discovered(service, info):
"""Handle service discovery."""
ssl = info.get("properties", {}).get("https", "0") == "1"
await async_configure_sabnzbd(hass, info, ssl)
discovery.async_listen(hass, SERVICE_SABNZBD, sabnzbd_discovered)
conf = config.get(DOMAIN)
if conf is not None:
use_ssl = conf[CONF_SSL]
name = conf.get(CONF_NAME)
api_key = conf.get(CONF_API_KEY)
await async_configure_sabnzbd(hass, conf, use_ssl, name, api_key)
return True
@callback
def async_setup_sabnzbd(hass, sab_api, config, name):
"""Set up SABnzbd sensors and services."""
sab_api_data = SabnzbdApiData(sab_api, name, config.get(CONF_SENSORS, {}))
if config.get(CONF_SENSORS):
hass.data[DATA_SABNZBD] = sab_api_data
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
async def async_service_handler(service):
"""Handle service calls."""
if service.service == SERVICE_PAUSE:
await sab_api_data.async_pause_queue()
elif service.service == SERVICE_RESUME:
await sab_api_data.async_resume_queue()
elif service.service == SERVICE_SET_SPEED:
speed = service.data.get(ATTR_SPEED)
await sab_api_data.async_set_queue_speed(speed)
hass.services.async_register(
DOMAIN, SERVICE_PAUSE, async_service_handler, schema=vol.Schema({})
)
hass.services.async_register(
DOMAIN, SERVICE_RESUME, async_service_handler, schema=vol.Schema({})
)
hass.services.async_register(
DOMAIN, SERVICE_SET_SPEED, async_service_handler, schema=SPEED_LIMIT_SCHEMA
)
async def async_update_sabnzbd(now):
"""Refresh SABnzbd queue data."""
try:
await sab_api.refresh_data()
async_dispatcher_send(hass, SIGNAL_SABNZBD_UPDATED, None)
except SabnzbdApiException as err:
_LOGGER.error(err)
async_track_time_interval(hass, async_update_sabnzbd, UPDATE_INTERVAL)
@callback
def async_request_configuration(hass, config, host, web_root):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.async_notify_errors(
_CONFIGURING[host], "Failed to register, please try again."
)
return
async def async_configuration_callback(data):
"""Handle configuration changes."""
api_key = data.get(CONF_API_KEY)
sab_api = SabnzbdApi(
host, api_key, web_root=web_root, session=async_get_clientsession(hass)
)
if not await async_check_sabnzbd(sab_api):
return
def success():
"""Signal successful setup."""
conf = load_json(hass.config.path(CONFIG_FILE))
conf[host] = {CONF_API_KEY: api_key}
save_json(hass.config.path(CONFIG_FILE), conf)
req_config = _CONFIGURING.pop(host)
configurator.request_done(req_config)
hass.async_add_job(success)
async_setup_sabnzbd(hass, sab_api, config, config.get(CONF_NAME, DEFAULT_NAME))
_CONFIGURING[host] = configurator.async_request_config(
DEFAULT_NAME,
async_configuration_callback,
description="Enter the API Key",
submit_caption="Confirm",
fields=[{"id": CONF_API_KEY, "name": "API Key", "type": ""}],
)
class SabnzbdApiData:
"""Class for storing/refreshing sabnzbd api queue data."""
def __init__(self, sab_api, name, sensors):
"""Initialize component."""
self.sab_api = sab_api
self.name = name
self.sensors = sensors
async def async_pause_queue(self):
"""Pause Sabnzbd queue."""
try:
return await self.sab_api.pause_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_resume_queue(self):
"""Resume Sabnzbd queue."""
try:
return await self.sab_api.resume_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_set_queue_speed(self, limit):
"""Set speed limit for the Sabnzbd queue."""
try:
return await self.sab_api.set_speed_limit(limit)
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
def get_queue_field(self, field):
"""Return the value for the given field from the Sabnzbd queue."""
return self.sab_api.queue.get(field)
| |
from sympy import symbols, Symbol, sinh, nan, oo, zoo, pi, asinh, acosh, log, sqrt, \
coth, I, cot, E, tanh, tan, cosh, cos, S, sin, Rational, atanh, acoth, \
Integer, O, exp
from sympy.utilities.pytest import XFAIL
def test_sinh():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert sinh(nan) == nan
assert sinh(zoo) == nan
assert sinh(oo) == oo
assert sinh(-oo) == -oo
assert sinh(0) == 0
assert sinh(1) == sinh(1)
assert sinh(-1) == -sinh(1)
assert sinh(x) == sinh(x)
assert sinh(-x) == -sinh(x)
assert sinh(pi) == sinh(pi)
assert sinh(-pi) == -sinh(pi)
assert sinh(2**1024 * E) == sinh(2**1024 * E)
assert sinh(-2**1024 * E) == -sinh(2**1024 * E)
assert sinh(pi*I) == 0
assert sinh(-pi*I) == 0
assert sinh(2*pi*I) == 0
assert sinh(-2*pi*I) == 0
assert sinh(-3*10**73*pi*I) == 0
assert sinh(7*10**103*pi*I) == 0
assert sinh(pi*I/2) == I
assert sinh(-pi*I/2) == -I
assert sinh(5*pi*I/2) == I
assert sinh(7*pi*I/2) == -I
assert sinh(pi*I/3) == S.Half*sqrt(3)*I
assert sinh(-2*pi*I/3) == -S.Half*sqrt(3)*I
assert sinh(pi*I/4) == S.Half*sqrt(2)*I
assert sinh(-pi*I/4) == -S.Half*sqrt(2)*I
assert sinh(17*pi*I/4) == S.Half*sqrt(2)*I
assert sinh(-3*pi*I/4) == -S.Half*sqrt(2)*I
assert sinh(pi*I/6) == S.Half*I
assert sinh(-pi*I/6) == -S.Half*I
assert sinh(7*pi*I/6) == -S.Half*I
assert sinh(-5*pi*I/6) == -S.Half*I
assert sinh(pi*I/105) == sin(pi/105)*I
assert sinh(-pi*I/105) == -sin(pi/105)*I
assert sinh(2 + 3*I) == sinh(2 + 3*I)
assert sinh(x*I) == sin(x)*I
assert sinh(k*pi*I) == 0
assert sinh(17*k*pi*I) == 0
assert sinh(k*pi*I/2) == sin(k*pi/2)*I
assert sinh(x).inverse() == asinh
def test_sinh_series():
x = Symbol('x')
assert sinh(x).series(x, 0, 10) == \
x + x**3/6 + x**5/120 + x**7/5040 + x**9/362880 + O(x**10)
def test_cosh():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert cosh(nan) == nan
assert cosh(zoo) == nan
assert cosh(oo) == oo
assert cosh(-oo) == oo
assert cosh(0) == 1
assert cosh(1) == cosh(1)
assert cosh(-1) == cosh(1)
assert cosh(x) == cosh(x)
assert cosh(-x) == cosh(x)
assert cosh(pi*I) == cos(pi)
assert cosh(-pi*I) == cos(pi)
assert cosh(2**1024 * E) == cosh(2**1024 * E)
assert cosh(-2**1024 * E) == cosh(2**1024 * E)
assert cosh(pi*I/2) == 0
assert cosh(-pi*I/2) == 0
assert cosh(pi*I/2) == 0
assert cosh(-pi*I/2) == 0
assert cosh((-3*10**73+1)*pi*I/2) == 0
assert cosh((7*10**103+1)*pi*I/2) == 0
assert cosh(pi*I) == -1
assert cosh(-pi*I) == -1
assert cosh(5*pi*I) == -1
assert cosh(8*pi*I) == 1
assert cosh(pi*I/3) == S.Half
assert cosh(-2*pi*I/3) == -S.Half
assert cosh(pi*I/4) == S.Half*sqrt(2)
assert cosh(-pi*I/4) == S.Half*sqrt(2)
assert cosh(11*pi*I/4) == -S.Half*sqrt(2)
assert cosh(-3*pi*I/4) == -S.Half*sqrt(2)
assert cosh(pi*I/6) == S.Half*sqrt(3)
assert cosh(-pi*I/6) == S.Half*sqrt(3)
assert cosh(7*pi*I/6) == -S.Half*sqrt(3)
assert cosh(-5*pi*I/6) == -S.Half*sqrt(3)
assert cosh(pi*I/105) == cos(pi/105)
assert cosh(-pi*I/105) == cos(pi/105)
assert cosh(2 + 3*I) == cosh(2 + 3*I)
assert cosh(x*I) == cos(x)
assert cosh(k*pi*I) == cos(k*pi)
assert cosh(17*k*pi*I) == cos(17*k*pi)
assert cosh(k*pi) == cosh(k*pi)
assert cosh(x).inverse() == acosh
def test_cosh_series():
x = Symbol('x')
assert cosh(x).series(x, 0, 10) == \
1 + x**2/2 + x**4/24 + x**6/720 + x**8/40320 + O(x**10)
def test_tanh():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert tanh(nan) == nan
assert tanh(zoo) == nan
assert tanh(oo) == 1
assert tanh(-oo) == -1
assert tanh(0) == 0
assert tanh(1) == tanh(1)
assert tanh(-1) == -tanh(1)
assert tanh(x) == tanh(x)
assert tanh(-x) == -tanh(x)
assert tanh(pi) == tanh(pi)
assert tanh(-pi) == -tanh(pi)
assert tanh(2**1024 * E) == tanh(2**1024 * E)
assert tanh(-2**1024 * E) == -tanh(2**1024 * E)
assert tanh(pi*I) == 0
assert tanh(-pi*I) == 0
assert tanh(2*pi*I) == 0
assert tanh(-2*pi*I) == 0
assert tanh(-3*10**73*pi*I) == 0
assert tanh(7*10**103*pi*I) == 0
assert tanh(pi*I/2) == tanh(pi*I/2)
assert tanh(-pi*I/2) == -tanh(pi*I/2)
assert tanh(5*pi*I/2) == tanh(5*pi*I/2)
assert tanh(7*pi*I/2) == tanh(7*pi*I/2)
assert tanh(pi*I/3) == sqrt(3)*I
assert tanh(-2*pi*I/3) == sqrt(3)*I
assert tanh(pi*I/4) == I
assert tanh(-pi*I/4) == -I
assert tanh(17*pi*I/4) == I
assert tanh(-3*pi*I/4) == I
assert tanh(pi*I/6) == I/sqrt(3)
assert tanh(-pi*I/6) == -I/sqrt(3)
assert tanh(7*pi*I/6) == I/sqrt(3)
assert tanh(-5*pi*I/6) == I/sqrt(3)
assert tanh(pi*I/105) == tan(pi/105)*I
assert tanh(-pi*I/105) == -tan(pi/105)*I
assert tanh(2 + 3*I) == tanh(2 + 3*I)
assert tanh(x*I) == tan(x)*I
assert tanh(k*pi*I) == 0
assert tanh(17*k*pi*I) == 0
assert tanh(k*pi*I/2) == tan(k*pi/2)*I
assert tanh(x).inverse() == atanh
def test_tanh_series():
x = Symbol('x')
assert tanh(x).series(x, 0, 10) == \
x - x**3/3 + 2*x**5/15 - 17*x**7/315 + 62*x**9/2835 + O(x**10)
def test_coth():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert coth(nan) == nan
assert coth(zoo) == nan
assert coth(oo) == 1
assert coth(-oo) == -1
assert coth(0) == coth(0)
assert coth(1) == coth(1)
assert coth(-1) == -coth(1)
assert coth(x) == coth(x)
assert coth(-x) == -coth(x)
assert coth(pi*I) == -I*cot(pi)
assert coth(-pi*I) == cot(pi)*I
assert coth(2**1024 * E) == coth(2**1024 * E)
assert coth(-2**1024 * E) == -coth(2**1024 * E)
assert coth(pi*I) == -I*cot(pi)
assert coth(-pi*I) == I*cot(pi)
assert coth(2*pi*I) == -I*cot(2*pi)
assert coth(-2*pi*I) == I*cot(2*pi)
assert coth(-3*10**73*pi*I) == I*cot(3*10**73*pi)
assert coth(7*10**103*pi*I) == -I*cot(7*10**103*pi)
assert coth(pi*I/2) == 0
assert coth(-pi*I/2) == 0
assert coth(5*pi*I/2) == 0
assert coth(7*pi*I/2) == 0
assert coth(pi*I/3) == -I/sqrt(3)
assert coth(-2*pi*I/3) == -I/sqrt(3)
assert coth(pi*I/4) == -I
assert coth(-pi*I/4) == I
assert coth(17*pi*I/4) == -I
assert coth(-3*pi*I/4) == -I
assert coth(pi*I/6) == -sqrt(3)*I
assert coth(-pi*I/6) == sqrt(3)*I
assert coth(7*pi*I/6) == -sqrt(3)*I
assert coth(-5*pi*I/6) == -sqrt(3)*I
assert coth(pi*I/105) == -cot(pi/105)*I
assert coth(-pi*I/105) == cot(pi/105)*I
assert coth(2 + 3*I) == coth(2 + 3*I)
assert coth(x*I) == -cot(x)*I
assert coth(k*pi*I) == -cot(k*pi)*I
assert coth(17*k*pi*I) == -cot(17*k*pi)*I
assert coth(k*pi*I) == -cot(k*pi)*I
assert coth(x).inverse() == acoth
def test_coth_series():
x = Symbol('x')
assert coth(x).series(x, 0, 8) == \
1/x + x/3 - x**3/45 + 2*x**5/945 - x**7/4725 + O(x**8)
def test_asinh():
x, y = symbols('x,y')
assert asinh(x) == asinh(x)
assert asinh(-x) == -asinh(x)
assert asinh(nan) == nan
assert asinh( 0) == 0
assert asinh(+1) == log(sqrt(2)+1)
assert asinh(-1) == log(sqrt(2)-1)
assert asinh(I) == pi*I/2
assert asinh(-I) == -pi*I/2
assert asinh(I/2) == pi*I/6
assert asinh(-I/2) == -pi*I/6
assert asinh(oo) == oo
assert asinh(-oo) == -oo
assert asinh(I*oo) == oo
assert asinh(-I *oo) == -oo
assert asinh(zoo) == zoo
assert asinh(I *(sqrt(3) - 1)/(2**(S(3)/2))) == pi*I/12
assert asinh(-I *(sqrt(3) - 1)/(2**(S(3)/2))) == -pi*I/12
assert asinh(I*(sqrt(5)-1)/4) == pi*I/10
assert asinh(-I*(sqrt(5)-1)/4) == -pi*I/10
assert asinh(I*(sqrt(5)+1)/4) == 3*pi*I/10
assert asinh(-I*(sqrt(5)+1)/4) == -3*pi*I/10
def test_asinh_series():
x = Symbol('x')
assert asinh(x).series(x, 0, 8) == \
x - x**3/6 + 3*x**5/40 - 5*x**7/112 + O(x**8)
t5 = asinh(x).taylor_term(5, x)
assert t5 == 3*x**5/40
assert asinh(x).taylor_term(7, x, t5, 0) == -5*x**7/112
def test_acosh():
# TODO please write more tests -- see #652
# From http://functions.wolfram.com/ElementaryFunctions/ArcCosh/03/01/
# at specific points
assert acosh(1) == 0
assert acosh(-1) == pi*I
assert acosh(0) == I*pi/2
assert acosh(Rational(1,2)) == I*pi/3
assert acosh(Rational(-1,2)) == 2*pi*I/3
assert acosh(zoo) == oo
assert acosh(I) == log(I*(1+sqrt(2)))
assert acosh(-I) == log(-I*(1+sqrt(2)))
assert acosh((sqrt(3)-1)/(2*sqrt(2))) == 5*pi*I/12
assert acosh(-(sqrt(3)-1)/(2*sqrt(2))) == 7*pi*I/12
assert acosh(sqrt(2)/2) == I*pi/4
assert acosh(-sqrt(2)/2) == 3*I*pi/4
assert acosh(sqrt(3)/2) == I*pi/6
assert acosh(-sqrt(3)/2) == 5*I*pi/6
assert acosh(sqrt(2+sqrt(2))/2) == I*pi/8
assert acosh(-sqrt(2+sqrt(2))/2) == 7*I*pi/8
assert acosh(sqrt(2-sqrt(2))/2) == 3*I*pi/8
assert acosh(-sqrt(2-sqrt(2))/2) == 5*I*pi/8
assert acosh((1+sqrt(3))/(2*sqrt(2))) == I*pi/12
assert acosh(-(1+sqrt(3))/(2*sqrt(2))) == 11*I*pi/12
assert acosh((sqrt(5)+1)/4) == I*pi/5
assert acosh(-(sqrt(5)+1)/4) == 4*I*pi/5
def test_acosh_infinities():
assert acosh(oo) == oo
assert acosh(-oo) == oo
assert acosh(I*oo) == oo
assert acosh(-I*oo) == oo
def test_acosh_series():
x = Symbol('x')
assert acosh(x).series(x, 0, 8) == \
-I*x + pi*I/2 - I*x**3/6 - 3*I*x**5/40 - 5*I*x**7/112 + O(x**8)
t5 = acosh(x).taylor_term(5, x)
assert t5 == - 3*I*x**5/40
assert acosh(x).taylor_term(7, x, t5, 0) == - 5*I*x**7/112
# TODO please write more tests -- see #652
def test_atanh():
# TODO please write more tests -- see #652
# From http://functions.wolfram.com/ElementaryFunctions/ArcTanh/03/01/
# at specific points
x = Symbol('x')
#at specific points
assert atanh(0) == 0
assert atanh(I) == I*pi/4
assert atanh(-I) == -I*pi/4
assert atanh(1) == oo
assert atanh(-1) == -oo
# at infinites
assert atanh(I*oo) == I*pi/2
assert atanh(-I*oo) == -I*pi/2
assert atanh(zoo) == nan
#properties
assert atanh(-x) == -atanh(x)
assert atanh(I/sqrt(3)) == I*pi/6
assert atanh(-I/sqrt(3)) == -I*pi/6
assert atanh(I*sqrt(3)) == I*pi/3
assert atanh(-I*sqrt(3)) == -I*pi/3
assert atanh(I*(1+sqrt(2))) == 3*pi*I/8
assert atanh(I*(sqrt(2)-1)) == pi*I/8
assert atanh(I*(1-sqrt(2))) == -pi*I/8
assert atanh(-I*(1+sqrt(2))) == -3*pi*I/8
assert atanh(I*sqrt(5+2*sqrt(5))) == 2*I*pi/5
assert atanh(-I*sqrt(5+2*sqrt(5))) == -2*I*pi/5
assert atanh(I*(2-sqrt(3))) == pi*I/12
assert atanh(I*(sqrt(3)-2)) == -pi*I/12
assert atanh(oo) == -I*pi/2
def test_atanh_series():
x = Symbol('x')
assert atanh(x).series(x, 0, 10) == \
x + x**3/3 + x**5/5 + x**7/7 + x**9/9 + O(x**10)
def test_atanh_infinities():
assert atanh(oo) == -I*pi/2
assert atanh(-oo) == I*pi/2
# TODO please write more tests -- see #652
def test_acoth():
# TODO please write more tests -- see #652
# From http://functions.wolfram.com/ElementaryFunctions/ArcCoth/03/01/
# at specific points
x = Symbol('x')
#at specific points
assert acoth(0) == I*pi/2
assert acoth(I) == -I*pi/4
assert acoth(-I) == I*pi/4
assert acoth(1) == oo
assert acoth(-1) == -oo
# at infinites
assert acoth(oo) == 0
assert acoth(-oo) == 0
assert acoth(I*oo) == 0
assert acoth(-I*oo) == 0
assert acoth(zoo) == 0
#properties
assert acoth(-x) == -acoth(x)
assert acoth(I/sqrt(3)) == -I*pi/3
assert acoth(-I/sqrt(3)) == I*pi/3
assert acoth(I*sqrt(3)) == -I*pi/6
assert acoth(-I*sqrt(3)) == I*pi/6
assert acoth(I*(1+sqrt(2))) == -pi*I/8
assert acoth(-I*(sqrt(2)+1)) == pi*I/8
assert acoth(I*(1-sqrt(2))) == 3*pi*I/8
assert acoth(I*(sqrt(2)-1)) == -3*pi*I/8
assert acoth(I*sqrt(5+2*sqrt(5))) == -I*pi/10
assert acoth(-I*sqrt(5+2*sqrt(5))) == I*pi/10
assert acoth(I*(2+sqrt(3))) == -pi*I/12
assert acoth(-I*(2+sqrt(3))) == pi*I/12
assert acoth(I*(2-sqrt(3))) == -5*pi*I/12
assert acoth(I*(sqrt(3)-2)) == 5*pi*I/12
def test_acoth_series():
x = Symbol('x')
assert acoth(x).series(x, 0, 10) == \
I*pi/2 + x + x**3/3 + x**5/5 + x**7/7 + x**9/9 + O(x**10)
def test_leading_term():
x = Symbol('x')
for func in [sinh, cosh, tanh, coth]:
assert func(x).as_leading_term(x) == 1
for func in [asinh, acosh, atanh, acoth]:
assert func(x).as_leading_term(x) == x
for func in [sinh, cosh, tanh, coth, asinh, acosh, atanh, acoth]:
for arg in (1/x, S.Half):
eq = func(arg)
assert eq.as_leading_term(x) == eq
def test_complex():
a,b = symbols('a,b', real=True)
z = a + b*I
for func in [sinh, cosh, tanh, coth]:
assert func(z).conjugate() == func(a - b*I)
for deep in [True,False]:
assert sinh(z).expand(complex=True,deep=deep) == sinh(a)*cos(b) + I*cosh(a)*sin(b)
assert cosh(z).expand(complex=True,deep=deep) == cosh(a)*cos(b) + I*sinh(a)*sin(b)
assert tanh(z).expand(complex=True,deep=deep) == sinh(a)*cosh(a)/(cos(b)**2+sinh(a)**2) + I*sin(b)*cos(b)/(cos(b)**2+sinh(a)**2)
assert coth(z).expand(complex=True,deep=deep) == sinh(a)*cosh(a)/(sin(b)**2+sinh(a)**2) - I*sin(b)*cos(b)/(sin(b)**2+sinh(a)**2)
def test_complex_2899():
a,b = symbols('a,b', real=True)
for deep in [True,False]:
for func in [sinh, cosh, tanh, coth]:
assert func(a).expand(complex=True,deep=deep) == func(a)
def test_simplifications():
x = Symbol('x')
assert sinh(asinh(x)) == x
assert sinh(acosh(x)) == sqrt(x-1) * sqrt(x+1)
assert sinh(atanh(x)) == x/sqrt(1-x**2)
assert sinh(acoth(x)) == 1/(sqrt(x-1) * sqrt(x+1))
assert cosh(asinh(x)) == sqrt(1+x**2)
assert cosh(acosh(x)) == x
assert cosh(atanh(x)) == 1/sqrt(1-x**2)
assert cosh(acoth(x)) == x/(sqrt(x-1) * sqrt(x+1))
assert tanh(asinh(x)) == x/sqrt(1+x**2)
assert tanh(acosh(x)) == sqrt(x-1) * sqrt(x+1) / x
assert tanh(atanh(x)) == x
assert tanh(acoth(x)) == 1/x
assert coth(asinh(x)) == sqrt(1+x**2)/x
assert coth(acosh(x)) == x/(sqrt(x-1) * sqrt(x+1))
assert coth(atanh(x)) == 1/x
assert coth(acoth(x)) == x
def test_issue1037():
assert cosh(asinh(Integer(3)/2)) == sqrt(Integer(13)/4)
def test_sinh_rewrite():
x = Symbol('x')
assert sinh(x).rewrite(exp) == (exp(x)-exp(-x))/2
assert sinh(x).rewrite(cosh) == -I*cosh(x+I*pi/2)
tanh_half = tanh(S.Half*x)
assert sinh(x).rewrite(tanh) == 2*tanh_half/(1-tanh_half**2)
coth_half = coth(S.Half*x)
assert sinh(x).rewrite(coth) == 2*coth_half/(coth_half**2-1)
def test_cosh_rewrite():
x = Symbol('x')
assert cosh(x).rewrite(exp) == (exp(x)+exp(-x))/2
assert cosh(x).rewrite(sinh) == -I*sinh(x+I*pi/2)
tanh_half = tanh(S.Half*x)**2
assert cosh(x).rewrite(tanh) == (1+tanh_half)/(1-tanh_half)
coth_half = coth(S.Half*x)**2
assert cosh(x).rewrite(coth) == (coth_half+1)/(coth_half-1)
def test_tanh_rewrite():
x = Symbol('x')
assert tanh(x).rewrite(exp) == (exp(x)-exp(-x))/(exp(x)+exp(-x))
assert tanh(x).rewrite(sinh) == I*sinh(x)/sinh(I*pi/2-x)
assert tanh(x).rewrite(cosh) == I*cosh(I*pi/2-x)/cosh(x)
assert tanh(x).rewrite(coth) == 1/coth(x)
def test_coth_rewrite():
x = Symbol('x')
assert coth(x).rewrite(exp) == (exp(x)+exp(-x))/(exp(x)-exp(-x))
assert coth(x).rewrite(sinh) == -I*sinh(I*pi/2-x)/sinh(x)
assert coth(x).rewrite(cosh) == -I*cosh(x)/cosh(I*pi/2-x)
assert coth(x).rewrite(tanh) == 1/tanh(x)
def test_derivs():
x = Symbol('x')
assert coth(x).diff(x) == -sinh(x)**(-2)
assert sinh(x).diff(x) == cosh(x)
assert cosh(x).diff(x) == sinh(x)
assert tanh(x).diff(x) == -tanh(x)**2 + 1
assert acoth(x).diff(x) == 1/(-x**2 + 1)
assert asinh(x).diff(x) == 1/sqrt(x**2 + 1)
assert acosh(x).diff(x) == 1/sqrt(x**2 - 1)
assert atanh(x).diff(x) == 1/(-x**2 + 1)
| |
#!/usr/bin/python3
#
# Copyright (C) 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing the hypervisor.hv_kvm module"""
import threading
import tempfile
import unittest
import socket
import os
import struct
import re
from ganeti import serializer
from ganeti import constants
from ganeti import compat
from ganeti import objects
from ganeti import errors
from ganeti import utils
from ganeti import pathutils
from ganeti.hypervisor import hv_kvm
import ganeti.hypervisor.hv_kvm.netdev as netdev
import ganeti.hypervisor.hv_kvm.monitor as monitor
import ganeti.hypervisor.hv_kvm.validation as validation
import mock
import testutils
from testutils.config_mock import ConfigMock
class QmpStub(threading.Thread):
"""Stub for a QMP endpoint for a KVM instance
"""
_QMP_BANNER_DATA = {
"QMP": {
"version": {
"package": "",
"qemu": {
"micro": 50,
"minor": 13,
"major": 0,
},
"capabilities": [],
},
}
}
_EMPTY_RESPONSE = {
"return": [],
}
_SUPPORTED_COMMANDS = {
"return": [
{"name": "command"},
{"name": "query-kvm"},
{"name": "eject"},
{"name": "query-status"},
{"name": "query-name"},
]
}
def __init__(self, socket_filename, server_responses):
"""Creates a QMP stub
@type socket_filename: string
@param socket_filename: filename of the UNIX socket that will be created
this class and used for the communication
@type server_responses: list
@param server_responses: list of responses that the server sends in response
to whatever it receives
"""
threading.Thread.__init__(self)
self.socket_filename = socket_filename
self.script = server_responses[:]
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.socket_filename)
self.socket.listen(1)
def run(self):
# Hypothesis: the messages we receive contain only a complete QMP message
# encoded in JSON.
conn, addr = self.socket.accept()
# Send the banner as the first thing
conn.send(self.encode_string(self._QMP_BANNER_DATA))
# Expect qmp_capabilities and return an empty response
conn.recv(4096)
conn.send(self.encode_string(self._EMPTY_RESPONSE))
# Expect query-commands and return the list of supported commands
conn.recv(4096)
conn.send(self.encode_string(self._SUPPORTED_COMMANDS))
while True:
# We ignore the expected message, as the purpose of this object is not
# to verify the correctness of the communication but to act as a
# partner for the SUT (System Under Test, that is QmpConnection)
msg = conn.recv(4096)
if not msg:
break
if not self.script:
break
response = self.script.pop(0)
if isinstance(response, str):
conn.send(response.encode("utf-8"))
elif isinstance(response, list):
for chunk in response:
conn.send(chunk.encode("utf-8"))
else:
raise errors.ProgrammerError("Unknown response type for %s" % response)
conn.close()
def encode_string(self, message):
return (serializer.DumpJson(message) +
hv_kvm.QmpConnection._MESSAGE_END_TOKEN)
def shutdown(self):
self.socket.close()
class TestParameterCheck(testutils.GanetiTestCase):
def testInvalidVncParameters(self):
invalid_data = {
constants.HV_VNC_X509_VERIFY: True,
constants.HV_VNC_X509: None
}
self.assertRaises(errors.HypervisorError,
validation.check_vnc_parameters, invalid_data)
def testValidVncParameters(self):
valid_data = {
constants.HV_VNC_X509_VERIFY: True,
constants.HV_VNC_X509: "mycert.pem"
}
self.assertTrue(validation.check_vnc_parameters(valid_data))
def testInvalidSecurityModel(self):
invalid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_USER,
constants.HV_SECURITY_DOMAIN: None
}
self.assertRaises(errors.HypervisorError,
validation.check_security_model, invalid_data)
invalid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_NONE,
constants.HV_SECURITY_DOMAIN: "secure_user"
}
self.assertRaises(errors.HypervisorError,
validation.check_security_model, invalid_data)
invalid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_POOL,
constants.HV_SECURITY_DOMAIN: "secure_user"
}
self.assertRaises(errors.HypervisorError,
validation.check_security_model, invalid_data)
def testValidSecurityModel(self):
valid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_USER,
constants.HV_SECURITY_DOMAIN: "secure_user"
}
self.assertTrue(validation.check_security_model(valid_data))
valid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_POOL,
constants.HV_SECURITY_DOMAIN: None
}
self.assertTrue(validation.check_security_model(valid_data))
valid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_NONE,
constants.HV_SECURITY_DOMAIN: None
}
self.assertTrue(validation.check_security_model(valid_data))
def testInvalidBootParameters(self):
invalid_data = {
constants.HV_BOOT_ORDER: constants.HT_BO_CDROM,
constants.HV_CDROM_IMAGE_PATH: None,
constants.HV_KERNEL_PATH: "/some/path",
constants.HV_ROOT_PATH: "/"
}
self.assertRaises(errors.HypervisorError,
validation.check_boot_parameters, invalid_data)
invalid_data = {
constants.HV_BOOT_ORDER: constants.HT_BO_CDROM,
constants.HV_CDROM_IMAGE_PATH: "/cd.iso",
constants.HV_KERNEL_PATH: "/some/path",
constants.HV_ROOT_PATH: None
}
self.assertRaises(errors.HypervisorError,
validation.check_boot_parameters, invalid_data)
def testValidBootParameters(self):
valid_data = {
constants.HV_BOOT_ORDER: constants.HT_BO_CDROM,
constants.HV_CDROM_IMAGE_PATH: "/cd.iso",
constants.HV_KERNEL_PATH: "/some/path",
constants.HV_ROOT_PATH: "/"
}
self.assertTrue(validation.check_boot_parameters(valid_data))
valid_data = {
constants.HV_BOOT_ORDER: constants.HT_BO_DISK,
constants.HV_CDROM_IMAGE_PATH: None,
constants.HV_KERNEL_PATH: "/some/path",
constants.HV_ROOT_PATH: "/"
}
self.assertTrue(validation.check_boot_parameters(valid_data))
valid_data = {
constants.HV_BOOT_ORDER: constants.HT_BO_DISK,
constants.HV_CDROM_IMAGE_PATH: None,
constants.HV_KERNEL_PATH: None,
constants.HV_ROOT_PATH: None
}
self.assertTrue(validation.check_boot_parameters(valid_data))
def testInvalidConsoleParameters(self):
invalid_data = {
constants.HV_SERIAL_CONSOLE: True,
constants.HV_SERIAL_SPEED: None,
}
self.assertRaises(errors.HypervisorError,
validation.check_console_parameters, invalid_data)
invalid_data = {
constants.HV_SERIAL_CONSOLE: True,
constants.HV_SERIAL_SPEED: 1,
}
self.assertRaises(errors.HypervisorError,
validation.check_console_parameters, invalid_data)
def testValidConsoleParameters(self):
valid_data = {
constants.HV_SERIAL_CONSOLE: False
}
self.assertTrue(validation.check_console_parameters(valid_data))
for speed in constants.VALID_SERIAL_SPEEDS:
valid_data = {
constants.HV_SERIAL_CONSOLE: True,
constants.HV_SERIAL_SPEED: speed
}
self.assertTrue(validation.check_console_parameters(valid_data),
"Testing serial console speed %d" % speed)
def testInvalidSpiceParameters(self):
invalid_data = {
constants.HV_KVM_SPICE_BIND: "0.0.0.0",
constants.HV_KVM_SPICE_IP_VERSION: constants.IP6_VERSION
}
self.assertRaises(errors.HypervisorError,
validation.check_spice_parameters, invalid_data)
invalid_data = {
constants.HV_KVM_SPICE_BIND: "::",
constants.HV_KVM_SPICE_IP_VERSION: constants.IP4_VERSION
}
self.assertRaises(errors.HypervisorError,
validation.check_spice_parameters, invalid_data)
invalid_data = {
constants.HV_KVM_SPICE_BIND: None,
constants.HV_KVM_SPICE_IP_VERSION: None,
constants.HV_KVM_SPICE_PASSWORD_FILE: "password.txt",
constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR: None,
constants.HV_KVM_SPICE_JPEG_IMG_COMPR: None,
constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR: None,
constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION: None,
constants.HV_KVM_SPICE_USE_TLS: True
}
self.assertRaises(errors.HypervisorError,
validation.check_spice_parameters, invalid_data)
def testValidSpiceParameters(self):
valid_data = {
constants.HV_KVM_SPICE_BIND: None,
constants.HV_KVM_SPICE_IP_VERSION: None,
constants.HV_KVM_SPICE_PASSWORD_FILE: None,
constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR: None,
constants.HV_KVM_SPICE_JPEG_IMG_COMPR: None,
constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR: None,
constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION: None,
constants.HV_KVM_SPICE_USE_TLS: None
}
self.assertTrue(validation.check_spice_parameters(valid_data))
valid_data = {
constants.HV_KVM_SPICE_BIND: "0.0.0.0",
constants.HV_KVM_SPICE_IP_VERSION: constants.IP4_VERSION,
constants.HV_KVM_SPICE_PASSWORD_FILE: "password.txt",
constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR: "glz",
constants.HV_KVM_SPICE_JPEG_IMG_COMPR: "never",
constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR: "never",
constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION: "off",
constants.HV_KVM_SPICE_USE_TLS: True
}
self.assertTrue(validation.check_spice_parameters(valid_data))
valid_data = {
constants.HV_KVM_SPICE_BIND: "::",
constants.HV_KVM_SPICE_IP_VERSION: constants.IP6_VERSION,
constants.HV_KVM_SPICE_PASSWORD_FILE: "password.txt",
constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR: "glz",
constants.HV_KVM_SPICE_JPEG_IMG_COMPR: "never",
constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR: "never",
constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION: "off",
constants.HV_KVM_SPICE_USE_TLS: True
}
self.assertTrue(validation.check_spice_parameters(valid_data))
def testInvalidDiskCacheParameters(self):
invalid_data = {
constants.HV_KVM_DISK_AIO: constants.HT_KVM_AIO_NATIVE,
constants.HV_DISK_CACHE: constants.HT_CACHE_WBACK
}
self.assertRaises(errors.HypervisorError,
validation.check_disk_cache_parameters, invalid_data)
invalid_data = {
constants.HV_KVM_DISK_AIO: constants.HT_KVM_AIO_NATIVE,
constants.HV_DISK_CACHE: constants.HT_CACHE_WTHROUGH
}
self.assertRaises(errors.HypervisorError,
validation.check_disk_cache_parameters, invalid_data)
invalid_data = {
constants.HV_KVM_DISK_AIO: constants.HT_KVM_AIO_NATIVE,
constants.HV_DISK_CACHE: constants.HT_CACHE_DEFAULT
}
self.assertRaises(errors.HypervisorError,
validation.check_disk_cache_parameters, invalid_data)
def testValidDiskCacheParameters(self):
valid_data = {
constants.HV_KVM_DISK_AIO: constants.HT_KVM_AIO_THREADS,
constants.HV_DISK_CACHE: constants.HT_CACHE_WBACK
}
self.assertTrue(validation.check_disk_cache_parameters(valid_data))
valid_data = {
constants.HV_KVM_DISK_AIO: constants.HT_KVM_AIO_THREADS,
constants.HV_DISK_CACHE: constants.HT_CACHE_WTHROUGH
}
self.assertTrue(validation.check_disk_cache_parameters(valid_data))
valid_data = {
constants.HV_KVM_DISK_AIO: constants.HT_KVM_AIO_THREADS,
constants.HV_DISK_CACHE: constants.HT_CACHE_DEFAULT
}
self.assertTrue(validation.check_disk_cache_parameters(valid_data))
valid_data = {
constants.HV_KVM_DISK_AIO: constants.HT_KVM_AIO_NATIVE,
constants.HV_DISK_CACHE: constants.HT_CACHE_NONE
}
self.assertTrue(validation.check_disk_cache_parameters(valid_data))
class TestParameterValidation(testutils.GanetiTestCase):
def testInvalidVncParameters(self):
# invalid IPv4 address
invalid_data = {
constants.HV_VNC_BIND_ADDRESS: "192.0.2.5.5",
}
self.assertRaises(errors.HypervisorError,
validation.validate_vnc_parameters, invalid_data)
# invalid network interface
invalid_data = {
constants.HV_VNC_BIND_ADDRESS: "doesnotexist0",
}
self.assertRaises(errors.HypervisorError,
validation.validate_vnc_parameters, invalid_data)
def testValidVncParameters(self):
valid_data = {
constants.HV_VNC_BIND_ADDRESS: "127.0.0.1"
}
self.assertTrue(validation.validate_vnc_parameters(valid_data))
valid_data = {
constants.HV_VNC_BIND_ADDRESS: "lo"
}
self.assertTrue(validation.validate_vnc_parameters(valid_data))
def testInvalidSecurityModelParameters(self):
invalid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_USER,
constants.HV_SECURITY_DOMAIN: "really-non-existing-user"
}
self.assertRaises(errors.HypervisorError,
validation.validate_security_model, invalid_data)
def testValidSecurityModelParameters(self):
valid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_NONE
}
self.assertTrue(validation.validate_security_model(valid_data))
valid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_POOL
}
self.assertTrue(validation.validate_security_model(valid_data))
valid_data = {
constants.HV_SECURITY_MODEL: constants.HT_SM_USER,
constants.HV_SECURITY_DOMAIN: "root"
}
self.assertTrue(validation.validate_security_model(valid_data))
def testInvalidMachineVersion(self):
kvm_machine_output = testutils.ReadTestData("kvm_6.0.0_machine.txt")
invalid_data = {
constants.HV_KVM_MACHINE_VERSION: "some-invalid-machine-type"
}
self.assertRaises(errors.HypervisorError,
validation.validate_machine_version, invalid_data,
kvm_machine_output)
def testValidMachineVersion(self):
kvm_machine_output = testutils.ReadTestData("kvm_6.0.0_machine.txt")
valid_data = {
constants.HV_KVM_MACHINE_VERSION: "pc-i440fx-6.0"
}
self.assertTrue(validation.validate_machine_version(valid_data,
kvm_machine_output))
def testInvalidSpiceParameters(self):
kvm_help_too_old = testutils.ReadTestData("kvm_0.9.1_help.txt")
kvm_help_working = testutils.ReadTestData("kvm_1.1.2_help.txt")
invalid_data = {
constants.HV_KVM_SPICE_BIND: "0.0.0.0",
constants.HV_VNC_BIND_ADDRESS: "0.0.0.0"
}
self.assertRaises(errors.HypervisorError,
validation.validate_spice_parameters, invalid_data,
kvm_help_working)
invalid_data = {
constants.HV_KVM_SPICE_BIND: "0.0.0.0",
constants.HV_VNC_BIND_ADDRESS: None
}
self.assertRaises(errors.HypervisorError,
validation.validate_spice_parameters, invalid_data,
kvm_help_too_old)
invalid_data = {
constants.HV_KVM_SPICE_BIND: "invalid-interface0",
constants.HV_VNC_BIND_ADDRESS: None
}
self.assertRaises(errors.HypervisorError,
validation.validate_spice_parameters, invalid_data,
kvm_help_working)
def testValidSpiceParameters(self):
kvm_help_working = testutils.ReadTestData("kvm_1.1.2_help.txt")
valid_data = {
constants.HV_KVM_SPICE_BIND: "0.0.0.0",
constants.HV_VNC_BIND_ADDRESS: None
}
self.assertTrue(validation.validate_spice_parameters(valid_data,
kvm_help_working))
valid_data = {
constants.HV_KVM_SPICE_BIND: "::",
constants.HV_VNC_BIND_ADDRESS: None
}
self.assertTrue(validation.validate_spice_parameters(valid_data,
kvm_help_working))
valid_data = {
constants.HV_KVM_SPICE_BIND: "lo",
constants.HV_VNC_BIND_ADDRESS: None
}
self.assertTrue(validation.validate_spice_parameters(valid_data,
kvm_help_working))
class TestDiskParameters(testutils.GanetiTestCase):
def testGenerateDiskAioCacheParameters(self):
test_cases = {
"aio_threaded_safe_storage_default_cache": {
"disk_aio": constants.HT_KVM_AIO_THREADS,
"disk_cache": constants.HT_CACHE_DEFAULT,
"dev_type": constants.DT_DRBD8,
"expected_string": ",aio=threads"
},
"aio_threaded_unsafe_storage_default_cache": {
"disk_aio": constants.HT_KVM_AIO_THREADS,
"disk_cache": constants.HT_CACHE_DEFAULT,
"dev_type": constants.DT_SHARED_FILE,
"expected_string": ",aio=threads,cache=none"
},
"aio_threaded_safe_storage_writeback_cache": {
"disk_aio": constants.HT_KVM_AIO_THREADS,
"disk_cache": constants.HT_CACHE_WBACK,
"dev_type": constants.DT_RBD,
"expected_string": ",aio=threads,cache=writeback"
},
"aio_native_safe_storage_none_cache": {
"disk_aio": constants.HT_KVM_AIO_NATIVE,
"disk_cache": constants.HT_CACHE_NONE,
"dev_type": constants.DT_DRBD8,
"expected_string": ",aio=native,cache=none"
},
"aio_native_safe_storage_writethrough_cache": {
"disk_aio": constants.HT_KVM_AIO_NATIVE,
"disk_cache": constants.HT_CACHE_WTHROUGH,
"dev_type": constants.DT_DRBD8,
"expected_string": ",aio=native,cache=none"
},
"aio_native_unsafe_storage_writethrough_cache": {
"disk_aio": constants.HT_KVM_AIO_NATIVE,
"disk_cache": constants.HT_CACHE_WTHROUGH,
"dev_type": constants.DT_GLUSTER,
"expected_string": ",aio=native,cache=none"
},
"aio_unset_safe_storage_none_cache": {
"disk_aio": None,
"disk_cache": constants.HT_CACHE_NONE,
"dev_type": constants.DT_DRBD8,
"expected_string": ",aio=threads,cache=none"
}
}
for name, data in test_cases.items():
self.assertEqual(hv_kvm.KVMHypervisor._GenerateDiskAioCacheParameters(
data["disk_aio"], data["disk_cache"], data["dev_type"]),
data["expected_string"], name)
class TestQmpMessage(testutils.GanetiTestCase):
def testSerialization(self):
test_data = {
"execute": "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
for k, v in test_data.items():
self.assertEqual(message[k], v)
serialized = message.to_bytes()
self.assertEqual(len(serialized.splitlines()), 1,
msg="Got multi-line message")
rebuilt_message = hv_kvm.QmpMessage.BuildFromJsonString(serialized)
self.assertEqual(rebuilt_message, message)
self.assertEqual(len(rebuilt_message), len(test_data))
def testDelete(self):
toDelete = "execute"
test_data = {
toDelete: "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
oldLen = len(message)
del message[toDelete]
newLen = len(message)
self.assertEqual(oldLen - 1, newLen)
class TestQmp(testutils.GanetiTestCase):
REQUESTS = [
{"execute": "query-kvm", "arguments": []},
{"execute": "eject", "arguments": {"device": "ide1-cd0"}},
{"execute": "query-status", "arguments": []},
{"execute": "query-name", "arguments": []},
]
SERVER_RESPONSES = [
# One message, one send()
'{"return": {"enabled": true, "present": true}}\r\n',
# Message sent using multiple send()
['{"retur', 'n": {}}\r\n'],
# Multiple messages sent using one send()
'{"return": [{"name": "quit"}, {"name": "eject"}]}\r\n'
'{"return": {"running": true, "singlestep": false}}\r\n',
]
EXPECTED_RESPONSES = [
{"enabled": True, "present": True},
{},
[{"name": "quit"}, {"name": "eject"}],
{"running": True, "singlestep": False},
]
def testQmp(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
# Set up the QMP connection
qmp_connection = hv_kvm.QmpConnection(socket_file.name)
try:
qmp_connection.connect()
# Format the script
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp_connection.Execute(request["execute"],
request["arguments"])
self.assertEqual(response, expected_response)
msg = hv_kvm.QmpMessage({"return": expected_response})
self.assertEqual(len(str(msg).splitlines()), 1,
msg="Got multi-line message")
self.assertRaises(monitor.QmpCommandNotSupported,
qmp_connection.Execute,
"unsupported-command")
finally:
qmp_stub.shutdown()
qmp_connection.close()
def testQmpContextManager(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
try:
# Test the context manager functionality
with hv_kvm.QmpConnection(socket_file.name) as qmp:
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp.Execute(request["execute"], request["arguments"])
self.assertEqual(response, expected_response)
finally:
qmp_stub.shutdown()
class TestConsole(unittest.TestCase):
def MakeConsole(self, instance, node, group, hvparams):
cons = hv_kvm.KVMHypervisor.GetInstanceConsole(instance, node, group,
hvparams, {})
self.assertEqual(cons.Validate(), None)
return cons
def testSerial(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node6017-uuid")
node = objects.Node(name="node6017", uuid="node6017-uuid",
ndparams={})
group = objects.NodeGroup(name="group6134", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: True,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: None,
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_SSH)
self.assertEqual(cons.host, node.name)
self.assertEqual(cons.command[0], pathutils.KVM_CONSOLE_WRAPPER)
self.assertEqual(cons.command[1], constants.SOCAT_PATH)
def testVnc(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node7235-uuid",
network_port=constants.VNC_BASE_PORT + 10)
node = objects.Node(name="node7235", uuid="node7235-uuid",
ndparams={})
group = objects.NodeGroup(name="group3632", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: "192.0.2.1",
constants.HV_KVM_SPICE_BIND: None,
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_VNC)
self.assertEqual(cons.host, "192.0.2.1")
self.assertEqual(cons.port, constants.VNC_BASE_PORT + 10)
self.assertEqual(cons.display, 10)
def testSpice(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node7235",
network_port=11000)
node = objects.Node(name="node7235", uuid="node7235-uuid",
ndparams={})
group = objects.NodeGroup(name="group0132", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: "192.0.2.1",
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_SPICE)
self.assertEqual(cons.host, "192.0.2.1")
self.assertEqual(cons.port, 11000)
def testNoConsole(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node24325",
network_port=0)
node = objects.Node(name="node24325", uuid="node24325-uuid",
ndparams={})
group = objects.NodeGroup(name="group9184", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: None,
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_MESSAGE)
class TestVersionChecking(testutils.GanetiTestCase):
@staticmethod
def ParseTestData(name):
help = testutils.ReadTestData(name)
return hv_kvm.KVMHypervisor._ParseKVMVersion(help)
def testParseVersion112(self):
self.assertEqual(
self.ParseTestData("kvm_1.1.2_help.txt"), ("1.1.2", 1, 1, 2))
def testParseVersion10(self):
self.assertEqual(self.ParseTestData("kvm_1.0_help.txt"), ("1.0", 1, 0, 0))
def testParseVersion01590(self):
self.assertEqual(
self.ParseTestData("kvm_0.15.90_help.txt"), ("0.15.90", 0, 15, 90))
def testParseVersion0125(self):
self.assertEqual(
self.ParseTestData("kvm_0.12.5_help.txt"), ("0.12.5", 0, 12, 5))
def testParseVersion091(self):
self.assertEqual(
self.ParseTestData("kvm_0.9.1_help.txt"), ("0.9.1", 0, 9, 1))
class TestSpiceParameterList(unittest.TestCase):
def setUp(self):
self.defaults = constants.HVC_DEFAULTS[constants.HT_KVM]
def testAudioCompressionDefaultOn(self):
self.assertTrue(self.defaults[constants.HV_KVM_SPICE_AUDIO_COMPR])
def testVdAgentDefaultOn(self):
self.assertTrue(self.defaults[constants.HV_KVM_SPICE_USE_VDAGENT])
def testTlsCiphersDefaultOn(self):
self.assertTrue(self.defaults[constants.HV_KVM_SPICE_TLS_CIPHERS])
def testBindDefaultOff(self):
self.assertFalse(self.defaults[constants.HV_KVM_SPICE_BIND])
def testAdditionalParams(self):
params = compat.UniqueFrozenset(
getattr(constants, name)
for name in dir(constants)
if name.startswith("HV_KVM_SPICE_"))
fixed = set([
constants.HV_KVM_SPICE_BIND, constants.HV_KVM_SPICE_TLS_CIPHERS,
constants.HV_KVM_SPICE_USE_VDAGENT, constants.HV_KVM_SPICE_AUDIO_COMPR])
self.assertEqual(hv_kvm.validation._SPICE_ADDITIONAL_PARAMS, params - fixed)
class TestHelpRegexps(testutils.GanetiTestCase):
"""Check _BOOT_RE
It has to match -drive.*boot=on|off except if there is another dash-option
at the beginning of the line.
"""
@staticmethod
def SearchTestData(name):
boot_re = hv_kvm.KVMHypervisor._BOOT_RE
help = testutils.ReadTestData(name)
return boot_re.search(help)
def testBootRe112(self):
self.assertFalse(self.SearchTestData("kvm_1.1.2_help.txt"))
def testBootRe10(self):
self.assertFalse(self.SearchTestData("kvm_1.0_help.txt"))
def testBootRe01590(self):
self.assertFalse(self.SearchTestData("kvm_0.15.90_help.txt"))
def testBootRe0125(self):
self.assertTrue(self.SearchTestData("kvm_0.12.5_help.txt"))
def testBootRe091(self):
self.assertTrue(self.SearchTestData("kvm_0.9.1_help.txt"))
def testBootRe091_fake(self):
self.assertFalse(self.SearchTestData("kvm_0.9.1_help_boot_test.txt"))
class TestGetTunFeatures(unittest.TestCase):
def testWrongIoctl(self):
tmpfile = tempfile.NamedTemporaryFile()
# A file does not have the right ioctls, so this must always fail
result = netdev._GetTunFeatures(tmpfile.fileno())
self.assertTrue(result is None)
def _FakeIoctl(self, features, fd, request, buf):
self.assertEqual(request, netdev.TUNGETFEATURES)
(reqno, ) = struct.unpack("I", buf)
self.assertEqual(reqno, 0)
return struct.pack("I", features)
def test(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
for features in [0, netdev.IFF_VNET_HDR]:
fn = compat.partial(self._FakeIoctl, features)
result = netdev._GetTunFeatures(fd, _ioctl=fn)
self.assertEqual(result, features)
class TestProbeTapVnetHdr(unittest.TestCase):
def _FakeTunFeatures(self, expected_fd, flags, fd):
self.assertEqual(fd, expected_fd)
return flags
def test(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
for flags in [0, netdev.IFF_VNET_HDR]:
fn = compat.partial(self._FakeTunFeatures, fd, flags)
result = netdev._ProbeTapVnetHdr(fd, _features_fn=fn)
if flags == 0:
self.assertFalse(result)
else:
self.assertTrue(result)
def testUnsupported(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
self.assertFalse(netdev._ProbeTapVnetHdr(fd, _features_fn=lambda _: None))
class TestGenerateDeviceKVMId(unittest.TestCase):
def test(self):
device = objects.NIC()
target = constants.HOTPLUG_TARGET_NIC
fn = hv_kvm._GenerateDeviceKVMId
device.uuid = "003fc157-66a8-4e6d-8b7e-ec4f69751396"
self.assertTrue(re.match("nic-003fc157-66a8-4e6d", fn(target, device)))
class TestGenerateDeviceHVInfo(testutils.GanetiTestCase):
def testPCI(self):
"""Test the placement of the first PCI device during startup."""
self.MockOut(mock.patch('ganeti.utils.EnsureDirs'))
hypervisor = hv_kvm.KVMHypervisor()
dev_type = constants.HOTPLUG_TARGET_NIC
kvm_devid = "nic-9e7c85f6-b6e5-4243"
hv_dev_type = constants.HT_NIC_PARAVIRTUAL
bus_slots = hypervisor._GetBusSlots()
hvinfo = hv_kvm._GenerateDeviceHVInfo(dev_type,
kvm_devid,
hv_dev_type,
bus_slots)
# NOTE: The PCI slot is zero-based, i.e. 13th slot has addr hex(12)
expected_hvinfo = {
"driver": "virtio-net-pci",
"id": kvm_devid,
"bus": "pci.0",
"addr": hex(constants.QEMU_DEFAULT_PCI_RESERVATIONS),
}
self.assertTrue(hvinfo == expected_hvinfo)
def testSCSI(self):
"""Test the placement of the first SCSI device during startup."""
self.MockOut(mock.patch('ganeti.utils.EnsureDirs'))
hypervisor = hv_kvm.KVMHypervisor()
dev_type = constants.HOTPLUG_TARGET_DISK
kvm_devid = "disk-932df160-7a22-4067"
hv_dev_type = constants.HT_DISK_SCSI_BLOCK
bus_slots = hypervisor._GetBusSlots()
hvinfo = hv_kvm._GenerateDeviceHVInfo(dev_type,
kvm_devid,
hv_dev_type,
bus_slots)
expected_hvinfo = {
"driver": "scsi-block",
"id": kvm_devid,
"bus": "scsi.0",
"channel": 0,
"scsi-id": 0,
"lun": 0,
}
self.assertTrue(hvinfo == expected_hvinfo)
class TestGetRuntimeInfo(unittest.TestCase):
@classmethod
def _GetRuntime(cls):
data = testutils.ReadTestData("kvm_runtime.json")
return hv_kvm._AnalyzeSerializedRuntime(data)
def _fail(self, target, device, runtime):
device.uuid = "aaaaaaaa-66a8-4e6d-8b7e-ec4f69751396"
self.assertRaises(errors.HotplugError,
hv_kvm._GetExistingDeviceInfo,
target, device, runtime)
def testNIC(self):
device = objects.NIC()
target = constants.HOTPLUG_TARGET_NIC
runtime = self._GetRuntime()
self._fail(target, device, runtime)
device.uuid = "003fc157-66a8-4e6d-8b7e-ec4f69751396"
devinfo = hv_kvm._GetExistingDeviceInfo(target, device, runtime)
self.assertTrue(devinfo.hvinfo["addr"] == "0x8")
def testDisk(self):
device = objects.Disk()
target = constants.HOTPLUG_TARGET_DISK
runtime = self._GetRuntime()
self._fail(target, device, runtime)
device.uuid = "9f5c5bd4-6f60-480b-acdc-9bb1a4b7df79"
(devinfo, _, __) = hv_kvm._GetExistingDeviceInfo(target, device, runtime)
self.assertTrue(devinfo.hvinfo["addr"] == "0xa")
class PostfixMatcher(object):
def __init__(self, string):
self.string = string
def __eq__(self, other):
return other.endswith(self.string)
def __repr__(self):
return "<Postfix %s>" % self.string
class TestKvmRuntime(testutils.GanetiTestCase):
"""The _ExecuteKvmRuntime is at the core of all KVM operations."""
def setUp(self):
super(TestKvmRuntime, self).setUp()
kvm_class = 'ganeti.hypervisor.hv_kvm.KVMHypervisor'
self.MockOut('qmp', mock.patch('ganeti.hypervisor.hv_kvm.QmpConnection'))
self.MockOut('run_cmd', mock.patch('ganeti.utils.RunCmd'))
self.MockOut('ensure_dirs', mock.patch('ganeti.utils.EnsureDirs'))
self.MockOut('write_file', mock.patch('ganeti.utils.WriteFile'))
self.MockOut(mock.patch(kvm_class + '.ValidateParameters'))
self.MockOut(mock.patch('ganeti.hypervisor.hv_kvm.OpenTap',
return_value=('test_nic', [], [])))
self.MockOut(mock.patch(kvm_class + '._ConfigureNIC'))
self.MockOut('pid_alive', mock.patch(kvm_class + '._InstancePidAlive',
return_value=('file', -1, False)))
self.MockOut(mock.patch(kvm_class + '._ExecuteCpuAffinity'))
self.MockOut(mock.patch(kvm_class + '._CallMonitorCommand'))
self.cfg = ConfigMock()
params = constants.HVC_DEFAULTS[constants.HT_KVM].copy()
beparams = constants.BEC_DEFAULTS.copy()
self.instance = self.cfg.AddNewInstance(name='name.example.com',
hypervisor='kvm',
hvparams=params,
beparams=beparams)
def testDirectoriesCreated(self):
hypervisor = hv_kvm.KVMHypervisor()
self.mocks['ensure_dirs'].assert_called_with([
(PostfixMatcher('/run/ganeti/kvm-hypervisor'), 0o775),
(PostfixMatcher('/run/ganeti/kvm-hypervisor/pid'), 0o775),
(PostfixMatcher('/run/ganeti/kvm-hypervisor/uid'), 0o775),
(PostfixMatcher('/run/ganeti/kvm-hypervisor/ctrl'), 0o775),
(PostfixMatcher('/run/ganeti/kvm-hypervisor/conf'), 0o775),
(PostfixMatcher('/run/ganeti/kvm-hypervisor/nic'), 0o775),
(PostfixMatcher('/run/ganeti/kvm-hypervisor/chroot'), 0o775),
(PostfixMatcher('/run/ganeti/kvm-hypervisor/chroot-quarantine'), 0o775)
])
def testStartInstance(self):
hypervisor = hv_kvm.KVMHypervisor()
def RunCmd(cmd, **kwargs):
if '--help' in cmd:
return mock.Mock(
failed=False, output=testutils.ReadTestData("kvm_current_help.txt"))
if '-S' in cmd:
self.mocks['pid_alive'].return_value = ('file', -1, True)
return mock.Mock(failed=False)
elif '-machine' in cmd:
return mock.Mock(failed=False, output='')
elif '-device' in cmd:
return mock.Mock(failed=False, output='name "virtio-blk-pci"')
else:
raise errors.ProgrammerError('Unexpected command: %s' % cmd)
self.mocks['run_cmd'].side_effect = RunCmd
hypervisor.StartInstance(self.instance, [], False)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| |
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| |
#!/usr/bin/env python
# - coding: utf-8 -
# Copyright (C) 2017 Toms Baugis <toms.baugis@gmail.com>
import math
import random
from gi.repository import Gtk as gtk
from gi.repository import GObject as gobject
from lib import graphics
from lib.pytweener import Easing
class Piece(graphics.Sprite):
def __init__(self, col, row, shape=None):
graphics.Sprite.__init__(self)
self.interactive = True
self.col = col
self.row = row
self.shape = shape
self.selected = False
self.reacting = False
self.special = None
self.recently_moved = False
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if self.shape == 0:
self.graphics.rectangle(-15, -15, 30, 30)
self.graphics.fill_preserve("#ff0000")
elif self.shape == 1:
self.graphics.circle(0, 0, 15)
self.graphics.fill_preserve("#00ff00")
elif self.shape == 2:
self.graphics.rotate(math.radians(180))
self.graphics.triangle(-15, -15, 30, 30)
self.graphics.fill_preserve("#0000ff")
elif self.shape == 3:
self.graphics.rotate(math.radians(45))
self.graphics.rectangle(-15, -15, 30, 30)
self.graphics.fill_preserve("#ff00ff")
elif self.shape == 4:
self.graphics.rotate(math.radians(45))
self.graphics.rectangle(-15, -15, 30, 30)
self.graphics.fill_preserve("#ffffff")
elif self.shape == 5:
self.graphics.rectangle(-15, -15, 30, 30)
self.graphics.fill_preserve("#00ffff")
elif self.shape == 6:
self.graphics.rectangle(-15, -15, 30, 30)
self.graphics.fill_preserve("#ffff00")
if self.selected:
self.graphics.set_line_style(width=5)
if self.reacting:
self.graphics.set_line_style(width=5)
self.graphics.stroke("#f00")
else:
self.graphics.stroke("#333")
if self.special == "explode":
self.graphics.circle(0, 0, 20)
self.graphics.set_line_style(width=5)
self.graphics.stroke("#f00")
class Board(graphics.Sprite):
"""
we create a 10x10 board and fill it with pieces
board consists of logic checking for consecutive pieces in rows and columns
there are 6 different type of pieces and the board has to be initiated
then again it would make sense to think about these as horizontally
positioned buckets next to each other, and do a horiz scan
the board controls the position of each piece
"""
def __init__(self):
# our board is an {(col, row): piece} dict
graphics.Sprite.__init__(self)
self.reset_board()
self.move_pieces()
self.interactive = True
self.dummy = 1
self.frame = 0
self.cursor_item = None
def on_click(self, piece, evt):
if not piece:
return
selected = [sel for sel in self.sprites if sel.selected]
selected = selected[0] if selected else None
proximity = 10
if selected:
proximity = abs(selected.col - piece.col) + abs(selected.row - piece.row)
if piece == selected:
piece.selected = False
if proximity == 1 and piece.shape != selected.shape:
# swap
piece.col, selected.col = selected.col, piece.col
piece.row, selected.row = selected.row, piece.row
piece.recently_moved, selected.recently_moved = True, True
reactions = self.get_reactions()
if not reactions:
# undo if the move doesn't do anything
piece.col, selected.col = selected.col, piece.col
piece.row, selected.row = selected.row, piece.row
else:
piece.selected, selected.selected = False, False
self.move_pieces(0.3)
self.animate(duration=0.3, dummy=1, on_complete=self.check_board)
else:
self.select_item(piece)
if selected:
selected.selected = False
def check_board(self, board=None):
# this process can repeat several times via cleanup and fill
reactions = self.get_reactions()
if reactions:
for group in reactions:
special_piece = [piece for piece in group if piece.recently_moved]
special_piece = special_piece[0] if special_piece else group[1]
if len(group) == 4:
# XXX in bejeweled the special one is the one you moved or the merger point
special_piece.special = "explode"
elif len(group) == 5:
group[1].special = "electricity"
for piece in group:
if not piece.special:
piece.reacting = True
piece.animate(opacity=0, duration=0.4)
self.animate(duration=0.4, dummy=1, on_complete=self.cleanup_and_fill)
for piece in self.sprites:
piece.recently_moved = False
def cleanup_and_fill(self, board):
for col in range(8):
cur_idx = 7
for piece in reversed(self._get_line(col, horiz=False)):
if not piece.reacting:
if piece.row != cur_idx:
piece.row = cur_idx
piece.recently_moved = True
cur_idx -= 1
else:
self.remove_child(piece)
missing = 8 - len(self._get_line(col, horiz=False))
for i in range(missing):
piece = Piece(col, i, random.randint(0, 6))
piece.x = 50 + col * 50
piece.y = 50 + (i - missing) * 50
self.add_child(piece)
piece.connect("on-click", self.on_click)
self.move_pieces()
self.check_board()
def reset_board(self):
"""renders a new board that doesn't have any explody conditions
it does so by filling the buckets 1-by-1 horizontally and then performs
scan on the whole board.
add piece -> check for explosions. if explosions do not actually add the piece
"""
for col in range(8):
for row in range(8):
piece = Piece(col, row, random.randint(0, 6))
self.add_child(piece)
piece.connect("on-click", self.on_click)
while self.get_reactions():
piece.shape = random.randint(0, 6)
def move_pieces(self, duration=0.4):
for piece in self.sprites:
piece.animate(x=50 + piece.col * 50, y = 50 + piece.row * 50, duration=duration, easing=Easing.Sine.ease_out)
def get_reactions(self):
"""runs through the board and returns list of pieces that have to be destroyed.
columns are actually rows pivoted - a plain list, so we don't need special cases
just a getter func
"""
reactions = []
def check_sequence(sequence):
if len(sequence) >= 3:
reactions.append(sequence)
for row in [True, False]:
for i in range(8):
sequence = []
for piece in self._get_line(i, row):
if not sequence or sequence[-1].shape != piece.shape:
check_sequence(sequence)
sequence = [piece]
else:
sequence.append(piece)
check_sequence(sequence)
return reactions
def _get_line(self, line_no, horiz=True):
line_items = []
for piece in self.sprites:
if (not horiz and piece.col == line_no) or (horiz and piece.row == line_no):
line_items.append(piece)
return sorted(line_items, key=lambda rec:rec.col if horiz else rec.row)
def select_item(self, item):
selected = [sel for sel in self.sprites if sel.selected]
selected = selected[0] if selected else None
if selected:
selected.selected = False
item.selected = item != selected
self.cursor_item = item if item.selected else None
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self)
self.board = Board()
self.add_child(self.board)
class BasicWindow:
def __init__(self):
window = gtk.Window()
window.set_default_size(450, 450)
window.connect("delete_event", lambda *args: gtk.main_quit())
window.add(Scene())
window.show_all()
if __name__ == '__main__':
window = BasicWindow()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
gtk.main()
| |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import datetime
import pytest
import uuid
from msrest.serialization import UTC
from azure.eventgrid._messaging_shared import _get_json_content
from azure.eventgrid import EventGridEvent
class MockQueueMessage(object):
def __init__(self, content=None):
self.id = uuid.uuid4()
self.inserted_on = datetime.datetime.now()
self.expires_on = datetime.datetime.now() + datetime.timedelta(days=100)
self.dequeue_count = 1
self.content = content
self.pop_receipt = None
self.next_visible_on = None
class MockServiceBusReceivedMessage(object):
def __init__(self, body=None, **kwargs):
self.body=body
self.application_properties=None
self.session_id=None
self.message_id='3f6c5441-5be5-4f33-80c3-3ffeb6a090ce'
self.content_type='application/cloudevents+json; charset=utf-8'
self.correlation_id=None
self.to=None
self.reply_to=None
self.reply_to_session_id=None
self.subject=None
self.time_to_live=datetime.timedelta(days=14)
self.partition_key=None
self.scheduled_enqueue_time_utc=None
self.auto_renew_error=None,
self.dead_letter_error_description=None
self.dead_letter_reason=None
self.dead_letter_source=None
self.delivery_count=13
self.enqueued_sequence_number=0
self.enqueued_time_utc=datetime.datetime(2021, 7, 22, 22, 27, 41, 236000)
self.expires_at_utc=datetime.datetime(2021, 8, 5, 22, 27, 41, 236000)
self.sequence_number=11219
self.lock_token='233146e3-d5a6-45eb-826f-691d82fb8b13'
class MockEventhubData(object):
def __init__(self, body=None):
self._last_enqueued_event_properties = {}
self._sys_properties = None
if body is None:
raise ValueError("EventData cannot be None.")
# Internal usage only for transforming AmqpAnnotatedMessage to outgoing EventData
self.body=body
self._raw_amqp_message = "some amqp data"
self.message_id = None
self.content_type = None
self.correlation_id = None
class MockBody(object):
def __init__(self, data=None):
self.data = data
def __iter__(self):
return self
def __next__(self):
if not self.data:
return """{"id":"f208feff-099b-4bda-a341-4afd0fa02fef","subject":"https://egsample.dev/sampleevent","data":"ServiceBus","event_type":"Azure.Sdk.Sample","event_time":"2021-07-22T22:27:38.960209Z","data_version":"1.0"}"""
return self.data
next = __next__
class MockEhBody(object):
def __init__(self, data=None):
self.data = data
def __iter__(self):
return self
def __next__(self):
if not self.data:
return b'[{"id":"f208feff-099b-4bda-a341-4afd0fa02fef","subject":"https://egsample.dev/sampleevent","data":"Eventhub","event_type":"Azure.Sdk.Sample","event_time":"2021-07-22T22:27:38.960209Z","data_version":"1.0"}]'
return self.data
next = __next__
def test_get_bytes_storage_queue():
cloud_storage_dict = """{
"id":"a0517898-9fa4-4e70-b4a3-afda1dd68672",
"subject":"/subscriptions/{subscription-id}/resourceGroups/{resource-group}/providers/Microsoft.Storage/storageAccounts/{storage-account}",
"data":{
"api":"PutBlockList",
"client_request_id":"6d79dbfb-0e37-4fc4-981f-442c9ca65760",
"request_id":"831e1650-001e-001b-66ab-eeb76e000000",
"e_tag":"0x8D4BCC2E4835CD0",
"content_type":"application/octet-stream",
"content_length":524288,
"blob_type":"BlockBlob",
"url":"https://oc2d2817345i60006.blob.core.windows.net/oc2d2817345i200097container/oc2d2817345i20002296blob",
"sequencer":"00000000000004420000000000028963",
"storage_diagnostics":{"batchId":"b68529f3-68cd-4744-baa4-3c0498ec19f0"}
},
"event_type":"Microsoft.Storage.BlobCreated",
"event_time":"2021-02-18T20:18:10.581147898Z",
"data_version":"1.0"
}"""
obj = MockQueueMessage(content=cloud_storage_dict)
dict = _get_json_content(obj)
assert dict.get('data') == {
"api":"PutBlockList",
"client_request_id":"6d79dbfb-0e37-4fc4-981f-442c9ca65760",
"request_id":"831e1650-001e-001b-66ab-eeb76e000000",
"e_tag":"0x8D4BCC2E4835CD0",
"content_type":"application/octet-stream",
"content_length":524288,
"blob_type":"BlockBlob",
"url":"https://oc2d2817345i60006.blob.core.windows.net/oc2d2817345i200097container/oc2d2817345i20002296blob",
"sequencer":"00000000000004420000000000028963",
"storage_diagnostics":{"batchId":"b68529f3-68cd-4744-baa4-3c0498ec19f0"}
}
assert dict.get('data_version') == "1.0"
def test_get_bytes_storage_queue_wrong_content():
string = u'This is a random string which must fail'
obj = MockQueueMessage(content=string)
with pytest.raises(ValueError, match="Failed to load JSON content from the object."):
_get_json_content(obj)
def test_get_bytes_servicebus():
obj = MockServiceBusReceivedMessage(
body=MockBody(),
message_id='3f6c5441-5be5-4f33-80c3-3ffeb6a090ce',
content_type='application/cloudevents+json; charset=utf-8',
time_to_live=datetime.timedelta(days=14),
delivery_count=13,
enqueued_sequence_number=0,
enqueued_time_utc=datetime.datetime(2021, 7, 22, 22, 27, 41, 236000),
expires_at_utc=datetime.datetime(2021, 8, 5, 22, 27, 41, 236000),
sequence_number=11219,
lock_token='233146e3-d5a6-45eb-826f-691d82fb8b13'
)
dict = _get_json_content(obj)
assert dict.get('data') == "ServiceBus"
assert dict.get('data_version') == '1.0'
def test_get_bytes_servicebus_wrong_content():
obj = MockServiceBusReceivedMessage(
body=MockBody(data='random'),
message_id='3f6c5441-5be5-4f33-80c3-3ffeb6a090ce',
content_type='application/json; charset=utf-8',
time_to_live=datetime.timedelta(days=14),
delivery_count=13,
enqueued_sequence_number=0,
enqueued_time_utc=datetime.datetime(2021, 7, 22, 22, 27, 41, 236000),
expires_at_utc=datetime.datetime(2021, 8, 5, 22, 27, 41, 236000),
sequence_number=11219,
lock_token='233146e3-d5a6-45eb-826f-691d82fb8b13'
)
with pytest.raises(ValueError, match="Failed to load JSON content from the object."):
dict = _get_json_content(obj)
def test_get_bytes_eventhubs():
obj = MockEventhubData(
body=MockEhBody()
)
dict = _get_json_content(obj)
assert dict.get('data') == 'Eventhub'
assert dict.get('data_version') == '1.0'
def test_get_bytes_eventhubs_wrong_content():
obj = MockEventhubData(
body=MockEhBody(data='random string')
)
with pytest.raises(ValueError, match="Failed to load JSON content from the object."):
dict = _get_json_content(obj)
def test_get_bytes_random_obj():
json_str = '{"id": "de0fd76c-4ef4-4dfb-ab3a-8f24a307e033", "subject": "https://egtest.dev/cloudcustomevent", "data": {"team": "event grid squad"}, "event_type": "Azure.Sdk.Sample", "event_time": "2020-08-07T02:06:08.11969Z", "data_version": "1.0"}'
random_obj = {
"id":"de0fd76c-4ef4-4dfb-ab3a-8f24a307e033",
"subject":"https://egtest.dev/cloudcustomevent",
"data":{"team": "event grid squad"},
"event_type":"Azure.Sdk.Sample",
"event_time":"2020-08-07T02:06:08.11969Z",
"data_version":"1.0",
}
assert _get_json_content(json_str) == random_obj
def test_from_json_sb():
obj = MockServiceBusReceivedMessage(
body=MockBody(),
message_id='3f6c5441-5be5-4f33-80c3-3ffeb6a090ce',
content_type='application/cloudevents+json; charset=utf-8',
time_to_live=datetime.timedelta(days=14),
delivery_count=13,
enqueued_sequence_number=0,
enqueued_time_utc=datetime.datetime(2021, 7, 22, 22, 27, 41, 236000),
expires_at_utc=datetime.datetime(2021, 8, 5, 22, 27, 41, 236000),
sequence_number=11219,
lock_token='233146e3-d5a6-45eb-826f-691d82fb8b13'
)
event = EventGridEvent.from_json(obj)
assert event.id == "f208feff-099b-4bda-a341-4afd0fa02fef"
assert event.data == "ServiceBus"
def test_from_json_eh():
obj = MockEventhubData(
body=MockEhBody()
)
event = EventGridEvent.from_json(obj)
assert event.id == "f208feff-099b-4bda-a341-4afd0fa02fef"
assert event.data == "Eventhub"
def test_from_json_storage():
eg_storage_dict = """{
"id":"a0517898-9fa4-4e70-b4a3-afda1dd68672",
"subject":"/subscriptions/{subscription-id}/resourceGroups/{resource-group}/providers/Microsoft.Storage/storageAccounts/{storage-account}",
"data":{
"api":"PutBlockList",
"client_request_id":"6d79dbfb-0e37-4fc4-981f-442c9ca65760",
"request_id":"831e1650-001e-001b-66ab-eeb76e000000",
"e_tag":"0x8D4BCC2E4835CD0",
"content_type":"application/octet-stream",
"content_length":524288,
"blob_type":"BlockBlob",
"url":"https://oc2d2817345i60006.blob.core.windows.net/oc2d2817345i200097container/oc2d2817345i20002296blob",
"sequencer":"00000000000004420000000000028963",
"storage_diagnostics":{"batchId":"b68529f3-68cd-4744-baa4-3c0498ec19f0"}
},
"event_type":"Microsoft.Storage.BlobCreated",
"event_time":"2021-02-18T20:18:10.581147898Z",
"data_version":"1.0"
}"""
obj = MockQueueMessage(content=eg_storage_dict)
event = EventGridEvent.from_json(obj)
assert event.data == {
"api":"PutBlockList",
"client_request_id":"6d79dbfb-0e37-4fc4-981f-442c9ca65760",
"request_id":"831e1650-001e-001b-66ab-eeb76e000000",
"e_tag":"0x8D4BCC2E4835CD0",
"content_type":"application/octet-stream",
"content_length":524288,
"blob_type":"BlockBlob",
"url":"https://oc2d2817345i60006.blob.core.windows.net/oc2d2817345i200097container/oc2d2817345i20002296blob",
"sequencer":"00000000000004420000000000028963",
"storage_diagnostics":{"batchId":"b68529f3-68cd-4744-baa4-3c0498ec19f0"}
}
def test_from_json():
json_str = '{"id": "de0fd76c-4ef4-4dfb-ab3a-8f24a307e033", "subject": "https://egtest.dev/cloudcustomevent", "data": {"team": "event grid squad"}, "event_type": "Azure.Sdk.Sample", "event_time": "2020-08-07T02:06:08.11969Z", "data_version": "1.0"}'
event = EventGridEvent.from_json(json_str)
assert event.data == {"team": "event grid squad"}
assert event.event_time == datetime.datetime(2020, 8, 7, 2, 6, 8, 119690, UTC())
| |
# encoding: utf-8
"""
Test data for relationship-related unit tests.
"""
from __future__ import absolute_import
from docx.opc.constants import RELATIONSHIP_TYPE as RT
from docx.opc.rel import Relationships
from docx.opc.constants import NAMESPACE as NS
from docx.opc.oxml import parse_xml
class BaseBuilder(object):
"""
Provides common behavior for all data builders.
"""
@property
def element(self):
"""Return element based on XML generated by builder"""
return parse_xml(self.xml)
def with_indent(self, indent):
"""Add integer *indent* spaces at beginning of element XML"""
self._indent = indent
return self
class RelationshipsBuilder(object):
"""Builder class for test Relationships"""
partname_tmpls = {
RT.SLIDE_MASTER: '/ppt/slideMasters/slideMaster%d.xml',
RT.SLIDE: '/ppt/slides/slide%d.xml',
}
def __init__(self):
self.relationships = []
self.next_rel_num = 1
self.next_partnums = {}
def _next_partnum(self, reltype):
if reltype not in self.next_partnums:
self.next_partnums[reltype] = 1
partnum = self.next_partnums[reltype]
self.next_partnums[reltype] = partnum + 1
return partnum
@property
def next_rId(self):
rId = 'rId%d' % self.next_rel_num
self.next_rel_num += 1
return rId
def _next_tuple_partname(self, reltype):
partname_tmpl = self.partname_tmpls[reltype]
partnum = self._next_partnum(reltype)
return partname_tmpl % partnum
def build(self):
rels = Relationships()
for rel in self.relationships:
rels.add_rel(rel)
return rels
class CT_DefaultBuilder(BaseBuilder):
"""
Test data builder for CT_Default (Default) XML element that appears in
`[Content_Types].xml`.
"""
def __init__(self):
"""Establish instance variables with default values"""
self._content_type = 'application/xml'
self._extension = 'xml'
self._indent = 0
self._namespace = ' xmlns="%s"' % NS.OPC_CONTENT_TYPES
def with_content_type(self, content_type):
"""Set ContentType attribute to *content_type*"""
self._content_type = content_type
return self
def with_extension(self, extension):
"""Set Extension attribute to *extension*"""
self._extension = extension
return self
def without_namespace(self):
"""Don't include an 'xmlns=' attribute"""
self._namespace = ''
return self
@property
def xml(self):
"""Return Default element"""
tmpl = '%s<Default%s Extension="%s" ContentType="%s"/>\n'
indent = ' ' * self._indent
return tmpl % (indent, self._namespace, self._extension,
self._content_type)
class CT_OverrideBuilder(BaseBuilder):
"""
Test data builder for CT_Override (Override) XML element that appears in
`[Content_Types].xml`.
"""
def __init__(self):
"""Establish instance variables with default values"""
self._content_type = 'app/vnd.type'
self._indent = 0
self._namespace = ' xmlns="%s"' % NS.OPC_CONTENT_TYPES
self._partname = '/part/name.xml'
def with_content_type(self, content_type):
"""Set ContentType attribute to *content_type*"""
self._content_type = content_type
return self
def with_partname(self, partname):
"""Set PartName attribute to *partname*"""
self._partname = partname
return self
def without_namespace(self):
"""Don't include an 'xmlns=' attribute"""
self._namespace = ''
return self
@property
def xml(self):
"""Return Override element"""
tmpl = '%s<Override%s PartName="%s" ContentType="%s"/>\n'
indent = ' ' * self._indent
return tmpl % (indent, self._namespace, self._partname,
self._content_type)
class CT_RelationshipBuilder(BaseBuilder):
"""
Test data builder for CT_Relationship (Relationship) XML element that
appears in .rels files
"""
def __init__(self):
"""Establish instance variables with default values"""
self._rId = 'rId9'
self._reltype = 'ReLtYpE'
self._target = 'docProps/core.xml'
self._target_mode = None
self._indent = 0
self._namespace = ' xmlns="%s"' % NS.OPC_RELATIONSHIPS
def with_rId(self, rId):
"""Set Id attribute to *rId*"""
self._rId = rId
return self
def with_reltype(self, reltype):
"""Set Type attribute to *reltype*"""
self._reltype = reltype
return self
def with_target(self, target):
"""Set XXX attribute to *target*"""
self._target = target
return self
def with_target_mode(self, target_mode):
"""Set TargetMode attribute to *target_mode*"""
self._target_mode = None if target_mode == 'Internal' else target_mode
return self
def without_namespace(self):
"""Don't include an 'xmlns=' attribute"""
self._namespace = ''
return self
@property
def target_mode(self):
if self._target_mode is None:
return ''
return ' TargetMode="%s"' % self._target_mode
@property
def xml(self):
"""Return Relationship element"""
tmpl = '%s<Relationship%s Id="%s" Type="%s" Target="%s"%s/>\n'
indent = ' ' * self._indent
return tmpl % (indent, self._namespace, self._rId, self._reltype,
self._target, self.target_mode)
class CT_RelationshipsBuilder(BaseBuilder):
"""
Test data builder for CT_Relationships (Relationships) XML element, the
root element in .rels files.
"""
def __init__(self):
"""Establish instance variables with default values"""
self._rels = (
('rId1', 'http://reltype1', 'docProps/core.xml', 'Internal'),
('rId2', 'http://linktype', 'http://some/link', 'External'),
('rId3', 'http://reltype2', '../slides/slide1.xml', 'Internal'),
)
@property
def xml(self):
"""
Return XML string based on settings accumulated via method calls.
"""
xml = '<Relationships xmlns="%s">\n' % NS.OPC_RELATIONSHIPS
for rId, reltype, target, target_mode in self._rels:
xml += (a_Relationship().with_rId(rId)
.with_reltype(reltype)
.with_target(target)
.with_target_mode(target_mode)
.with_indent(2)
.without_namespace()
.xml)
xml += '</Relationships>\n'
return xml
class CT_TypesBuilder(BaseBuilder):
"""
Test data builder for CT_Types (<Types>) XML element, the root element in
[Content_Types].xml files
"""
def __init__(self):
"""Establish instance variables with default values"""
self._defaults = (
('xml', 'application/xml'),
('jpeg', 'image/jpeg'),
)
self._empty = False
self._overrides = (
('/docProps/core.xml', 'app/vnd.type1'),
('/ppt/presentation.xml', 'app/vnd.type2'),
('/docProps/thumbnail.jpeg', 'image/jpeg'),
)
def empty(self):
self._empty = True
return self
@property
def xml(self):
"""
Return XML string based on settings accumulated via method calls
"""
if self._empty:
return '<Types xmlns="%s"/>\n' % NS.OPC_CONTENT_TYPES
xml = '<Types xmlns="%s">\n' % NS.OPC_CONTENT_TYPES
for extension, content_type in self._defaults:
xml += (a_Default().with_extension(extension)
.with_content_type(content_type)
.with_indent(2)
.without_namespace()
.xml)
for partname, content_type in self._overrides:
xml += (an_Override().with_partname(partname)
.with_content_type(content_type)
.with_indent(2)
.without_namespace()
.xml)
xml += '</Types>\n'
return xml
def a_Default():
return CT_DefaultBuilder()
def a_Relationship():
return CT_RelationshipBuilder()
def a_Relationships():
return CT_RelationshipsBuilder()
def a_Types():
return CT_TypesBuilder()
def an_Override():
return CT_OverrideBuilder()
| |
"""Scryfall object models."""
from dataclasses import dataclass
import datetime as dt
from decimal import Decimal
from enum import Enum
from typing import ClassVar
from typing import Dict
from typing import NewType
from typing import Optional
from typing import Sequence
from uuid import UUID
URI = NewType("URI", str)
class ScryObject:
"""Base object class for scryfall response objects."""
object: ClassVar[str] = "object"
class ScryColor(str, Enum):
"""Enum for https://scryfall.com/docs/api/colors#color-arrays"""
WHITE = "W"
BLUE = "U"
BLACK = "B"
RED = "R"
GREEN = "G"
COLORLESS = "C"
class ScrySetType(str, Enum):
"""Enum for https://scryfall.com/docs/api/sets#set-types"""
CORE = "core"
EXPANSION = "expansion"
MASTERS = "masters"
MASTERPIECE = "masterpiece"
FROM_THE_VAULT = "from_the_vault"
SPELLBOOK = "spellbook"
PREMIUM_DECK = "premium_deck"
DUEL_DECK = "duel_deck"
DRAFT_INNOVATION = "draft_innovation"
TREASURE_CHEST = "treasure_chest"
COMMANDER = "commander"
PLANECHASE = "planechase"
ARCHENEMY = "archenemy"
VANGUARD = "vanguard"
FUNNY = "funny"
STARTER = "starter"
BOX = "box"
PROMO = "promo"
TOKEN = "token"
MEMORABILIA = "memorabilia"
class ScryCardLayout(str, Enum):
"""Enum for https://scryfall.com/docs/api/layouts#layout"""
NORMAL = "normal"
SPLIT = "split"
FLIP = "flip"
TRANSFORM = "transform"
MODAL_DFC = "modal_dfc"
MELD = "meld"
LEVELER = "leveler"
SAGA = "saga"
ADVENTURE = "adventure"
PLANAR = "planar"
SCHEME = "scheme"
VANGUARD = "vanguard"
TOKEN = "token"
DOUBLE_FACED_TOKEN = "double_faced_token"
EMBLEM = "emblem"
AUGMENT = "augment"
HOST = "host"
ART_SERIES = "art_series"
DOUBLE_SIDED = "double_sided"
class ScryCardFrame(str, Enum):
"""Enum for https://scryfall.com/docs/api/layouts#frames"""
Y1993 = "1993"
Y1997 = "1997"
Y2003 = "2003"
Y2015 = "2015"
FUTURE = "future"
class ScryFrameEffect(str, Enum):
"""Enum for https://scryfall.com/docs/api/layouts#frame-effects"""
NONE = ""
LEGENDARY = "legendary"
MIRACLE = "miracle"
NYXBORN = "nyxborn"
NYXTOUCHED = "nyxtouched"
DRAFT = "draft"
DEVOID = "devoid"
TOMBSTONE = "tombstone"
COLORSHIFTED = "colorshifted"
INVERTED = "inverted"
SUNMOONDFC = "sunmoondfc"
COMPASSLANDDFC = "compasslanddfc"
ORIGINPWDFC = "originpwdfc"
MOONELDRAZIDFC = "mooneldrazidfc"
MOONREVERSEMOONDFC = "moonreversemoondfc"
WAXINGANDWANINGMOONDFC = "waxingandwaningmoondfc"
SHOWCASE = "showcase"
EXTENDEDART = "extendedart"
COMPANION = "companion"
FULLART = "fullart"
class ScryBorderColor(str, Enum):
"""Enum for card border_color"""
BLACK = "black"
BORDERLESS = "borderless"
GOLD = "gold"
SILVER = "silver"
WHITE = "white"
class ScryGame(str, Enum):
"""Enum for card games"""
PAPER = "paper"
ARENA = "arena"
MTGO = "mtgo"
SEGA = "sega"
ASTRAL = "astral"
class ScryRarity(str, Enum):
"""Enum for card rarity"""
COMMON = "common"
UNCOMMON = "uncommon"
RARE = "rare"
MYTHIC = "mythic"
class ScryFormat(str, Enum):
"""Enum for card legalities keys"""
BRAWL = "brawl"
COMMANDER = "commander"
DUEL = "duel"
FRONTIER = "frontier"
FUTURE = "future"
LEGACY = "legacy"
MODERN = "modern"
OLDSCHOOL = "oldschool"
PAUPER = "pauper"
PENNY = "penny"
STANDARD = "standard"
VINTAGE = "vintage"
HISTORIC = "historic"
PIONEER = "pioneer"
class ScryLegality(str, Enum):
"""Enum for card legalities values"""
LEGAL = "legal"
NOT_LEGAL = "not_legal"
RESTRICTED = "restricted"
BANNED = "banned"
@dataclass(frozen=True)
class ScryObjectList(ScryObject):
"""Model for https://scryfall.com/docs/api/lists"""
object: ClassVar[str] = "list"
data: Sequence[ScryObject]
has_more: bool
next_page: Optional[URI]
total_cards: Optional[int]
warnings: Optional[Sequence[str]]
@dataclass(frozen=True)
class ScrySet(ScryObject):
"""Model for https://scryfall.com/docs/api/sets"""
object: ClassVar[str] = "set"
id: UUID
code: str
mtgo_code: Optional[str]
arena_code: Optional[str]
tcgplayer_id: Optional[int]
name: str
set_type: ScrySetType
released_at: Optional[dt.date]
block_code: Optional[str]
block: Optional[str]
parent_set_code: Optional[str]
card_count: int
digital: bool
foil_only: bool
nonfoil_only: Optional[bool]
icon_svg_uri: URI
search_uri: URI
scryfall_uri: URI
uri: URI
@dataclass(frozen=True)
class ScryRelatedCard(ScryObject):
"""Model for https://scryfall.com/docs/api/cards#related-card-objects"""
object: ClassVar[str] = "related_card"
id: UUID
component: str
name: str
type_line: str
uri: URI
@dataclass(frozen=True)
class ScryCardFace(ScryObject):
"""Model for https://scryfall.com/docs/api/cards#card-face-objects"""
object: ClassVar[str] = "card_face"
artist: Optional[str]
artist_id: Optional[UUID]
color_indicator: Optional[Sequence[ScryColor]]
colors: Optional[Sequence[ScryColor]]
flavor_text: Optional[str]
illustration_id: Optional[UUID]
image_uris: Optional[Dict[str, URI]]
loyalty: Optional[str]
mana_cost: str
name: str
oracle_text: Optional[str]
power: Optional[str]
printed_name: Optional[str]
printed_text: Optional[str]
printed_type_line: Optional[str]
toughness: Optional[str]
type_line: Optional[str]
watermark: Optional[str]
@dataclass(frozen=True)
class CardPreviewBlock:
"""Model for card preview block."""
source: str
source_uri: str # TODO: should be uri
previewed_at: dt.date
@dataclass(frozen=True)
class ScryCard(ScryObject):
"""Model for https://scryfall.com/docs/api/cards"""
object: ClassVar[str] = "card"
# Core Card Fields
arena_id: Optional[int]
id: UUID
lang: str
mtgo_id: Optional[int]
mtgo_foil_id: Optional[int]
multiverse_ids: Optional[Sequence[int]]
tcgplayer_id: Optional[int]
oracle_id: UUID
prints_search_uri: URI
rulings_uri: URI
scryfall_uri: URI
uri: URI
# Gameplay Fields
all_parts: Optional[Sequence[ScryRelatedCard]]
card_faces: Optional[Sequence[ScryCardFace]]
cmc: Decimal
colors: Optional[Sequence[ScryColor]]
color_identity: Sequence[ScryColor]
color_indicator: Optional[Sequence[ScryColor]]
edhrec_rank: Optional[int]
foil: bool
hand_modifier: Optional[str]
keywords: Sequence[str]
layout: ScryCardLayout
legalities: Dict[ScryFormat, ScryLegality]
life_modifier: Optional[str]
loyalty: Optional[str]
mana_cost: Optional[str]
name: str
nonfoil: bool
oracle_text: Optional[str]
oversized: bool
power: Optional[str]
produced_mana: Optional[Sequence[ScryColor]]
reserved: bool
toughness: Optional[str]
type_line: Optional[str]
# Print Fields
artist: Optional[str]
artist_ids: Optional[Sequence[UUID]]
booster: bool
border_color: ScryBorderColor
card_back_id: UUID
collector_number: str
content_warning: Optional[bool]
digital: bool
flavor_name: Optional[str]
flavor_text: Optional[str]
frame_effect: Optional[ScryFrameEffect]
frame_effects: Optional[Sequence[ScryFrameEffect]]
frame: ScryCardFrame
full_art: bool
games: Sequence[ScryGame]
highres_image: bool
illustration_id: Optional[UUID]
image_uris: Optional[Dict[str, URI]]
prices: Optional[Dict[str, Optional[Decimal]]] # TODO: enum keys
printed_name: Optional[str]
printed_text: Optional[str]
printed_type_line: Optional[str]
promo: bool
promo_types: Optional[Sequence[str]]
purchase_uris: Optional[Dict[str, URI]]
rarity: ScryRarity
related_uris: Optional[Dict[str, URI]]
released_at: dt.date
reprint: bool
scryfall_set_uri: URI
set_name: str
set_search_uri: URI
set_type: str
set_uri: URI
set: str
story_spotlight: bool
textless: bool
variation: bool
variation_of: Optional[UUID]
watermark: Optional[str]
preview: Optional[CardPreviewBlock]
@dataclass(frozen=True)
class ScryBulkData(ScryObject):
"""Model for https://scryfall.com/docs/api/bulk-data"""
object: ClassVar[str] = "bulk_data"
id: UUID
uri: URI
type: str
name: str
description: str
download_uri: URI
updated_at: dt.datetime
compressed_size: int
content_type: str
content_encoding: str
| |
import datetime
import ujson
import re
import mock
from email.utils import parseaddr
from django.conf import settings
from django.http import HttpResponse
from django.conf import settings
from mock import patch
from typing import Any, Dict, List, Union, Mapping
from zerver.lib.actions import (
do_change_is_admin,
do_change_realm_subdomain,
do_set_realm_property,
do_deactivate_realm,
do_deactivate_stream,
do_create_realm,
do_scrub_realm,
create_stream_if_needed,
do_change_plan_type,
do_send_realm_reactivation_email
)
from confirmation.models import create_confirmation_link, Confirmation
from zerver.lib.send_email import send_future_email
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import tornado_redirected_to_list
from zerver.lib.test_runner import slow
from zerver.models import get_realm, Realm, UserProfile, ScheduledEmail, get_stream, \
CustomProfileField, Message, UserMessage, Attachment, get_user_profile_by_email
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, user_profile: UserProfile,
new_realm_name: str) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self) -> None:
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = u'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user('hamlet'), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm('zulip')
new_name = u'Puliz'
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self) -> None:
realm = get_realm('zulip')
new_description = u'zulip dev group'
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self) -> None:
email = self.example_email("iago")
self.login(email)
realm = get_realm('zulip')
new_description = u'zulip dev group'
data = dict(description=ujson.dumps(new_description))
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self) -> None:
new_description = u'A' * 1001
data = dict(description=ujson.dumps(new_description))
# create an admin user
email = self.example_email("iago")
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self) -> None:
new_name = u'A' * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=ujson.dumps(new_name))
# create an admin user
email = self.example_email("iago")
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization name is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = 'Mice will play while the cat is away'
user_profile = self.example_user('othello')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be an organization administrator')
def test_unauthorized_name_change(self) -> None:
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings'
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
# Realm admins can change their name even setting is disabled.
data = {'full_name': 'New Iago'}
self.login(self.example_email("iago"))
url = '/json/settings'
result = self.client_patch(url, data)
self.assert_in_success_response(['"full_name":"New Iago"'], result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
"""The main complicated thing about changing realm subdomains is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
user = get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip')
do_change_realm_subdomain(realm, "newzulip")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.realm.string_id, "newzulip")
# This doesn't use a cache right now, but may later.
self.assertIsNone(get_realm("zulip"))
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user('hamlet')
send_future_email('zerver/emails/followup_day1', user.realm,
to_user_ids=[user.id], delay=datetime.timedelta(hours=1))
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_deactivate_realm_on_deactived_realm(self) -> None:
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_realm_reactivation_link(self) -> None:
realm = get_realm('zulip')
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, realm.host, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm('zulip')
do_send_realm_reactivation_email(realm)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
from_email = outbox[0].from_email
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertIn("Zulip Account Security", from_email)
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Reactivate your Zulip organization', outbox[0].subject)
self.assertIn('To reactivate organization, please click here:', outbox[0].body)
admins = realm.get_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(['The organization reactivation link has expired or is not valid.'], response)
def test_change_notifications_stream(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
disabled_notif_stream_id = -1
req = dict(notifications_stream_id = ujson.dumps(disabled_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = 4
req = dict(notifications_stream_id = ujson.dumps(new_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id = ujson.dumps(invalid_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.notifications_stream_id = verona.id
realm.save(update_fields=["notifications_stream"])
notifications_stream = realm.get_notifications_stream()
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
disabled_signup_notifications_stream_id = -1
req = dict(signup_notifications_stream_id = ujson.dumps(disabled_signup_notifications_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = 4
req = dict(signup_notifications_stream_id = ujson.dumps(new_signup_notifications_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(signup_notifications_stream_id = ujson.dumps(invalid_signup_notifications_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
self.assertNotEqual(realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = self.example_email("iago")
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_admin(self) -> None:
email = self.example_email('iago')
self.login(email)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_admin(self) -> None:
email = self.example_email('hamlet')
self.login(email)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_error(result, "Must be an organization administrator")
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_change_bot_creation_policy(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(bot_creation_policy = ujson.dumps(Realm.BOT_CREATION_LIMIT_GENERIC_BOTS))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_add_bot_permission = 4
req = dict(bot_creation_policy = ujson.dumps(invalid_add_bot_permission))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid bot creation policy')
def test_change_email_address_visibility(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
invalid_value = 4
req = dict(email_address_visibility = ujson.dumps(invalid_value))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid email address visibility policy')
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
req = dict(email_address_visibility = ujson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(get_realm('zulip').video_chat_provider, "Jitsi")
email = self.example_email("iago")
self.login(email)
req = {"video_chat_provider": ujson.dumps("Google Hangouts")}
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid domain: Domain can't be empty.")
req = {
"video_chat_provider": ujson.dumps("Google Hangouts"),
"google_hangouts_domain": ujson.dumps("invaliddomain"),
}
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid domain: Domain must have at least one dot (.)")
req = {
"video_chat_provider": ujson.dumps("Google Hangouts"),
"google_hangouts_domain": ujson.dumps("zulip.com"),
}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider, "Google Hangouts")
req = {"video_chat_provider": ujson.dumps("Jitsi")}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider, "Jitsi")
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm('hosted', 'hosted').plan_type, Realm.LIMITED)
self.assertEqual(get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(do_create_realm('onpremise', 'onpremise').plan_type, Realm.SELF_HOSTED)
self.assertEqual(get_realm('onpremise').max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm('onpremise').message_visibility_limit, None)
def test_change_plan_type(self) -> None:
user = self.example_user('iago')
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
do_change_plan_type(user, Realm.STANDARD)
realm = get_realm('zulip')
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
do_change_plan_type(user, Realm.LIMITED)
realm = get_realm('zulip')
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
do_change_plan_type(user, Realm.STANDARD_FREE)
realm = get_realm('zulip')
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
user_profile = self.example_user('cordelia')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, True)
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: int) -> Realm:
result = self.client_patch('/json/realm', {name: ujson.dumps(value)})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def do_test_realm_update_api(self, name: str) -> None:
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests = [False, True] # type: List[bool]
test_values = dict(
default_language=[u'de', u'en'],
description=[u'Realm description', u'New description'],
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
waiting_period_threshold=[10, 20],
bot_creation_policy=[1, 2],
email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS],
video_chat_provider=[u'Jitsi', u'Hangouts'],
google_hangouts_domain=[u'zulip.com', u'zulip.org'],
) # type: Dict[str, Any]
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError('No test created for %s' % (name))
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
@slow("Tests a dozen properties in a loop")
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self) -> None:
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
self.set_up_db('allow_community_topic_editing', False)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
realm = self.update_with_api('allow_community_topic_editing', True)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_community_topic_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, False)
def test_update_realm_allow_message_deleting(self) -> None:
"""Tests updating the realm property 'allow_message_deleting'."""
self.set_up_db('allow_message_deleting', True)
self.set_up_db('message_content_delete_limit_seconds', 0)
realm = self.update_with_api('allow_message_deleting', False)
self.assertEqual(realm.allow_message_deleting, False)
self.assertEqual(realm.message_content_delete_limit_seconds, 0)
realm = self.update_with_api('allow_message_deleting', True)
realm = self.update_with_api('message_content_delete_limit_seconds', 100)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api('message_content_delete_limit_seconds', 600)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago.email, "Scotland")
self.send_stream_message(othello.email, "Scotland")
self.send_stream_message(cordelia.email, "Shakespeare", sender_realm="lear")
self.send_stream_message(king.email, "Shakespeare", sender_realm="lear")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt")
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt")
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt")
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt")
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with mock.patch('logging.warning'):
do_scrub_realm(zulip)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
| |
import numpy as np
import re
from collections import deque
from twitch import TwitchChatStream
import random
import time
import exrex
import copy
EXREX_REGEX_ONE = ("(@__username__: (Wow|Amazing|Fascinating|Incredible|Marvelous|Wonderful|AAAAAah|OMG)\. __WORD__, that's (deep|wild|trippy|dope|weird|spacy), (man|dude|brother|bro|buddy|my man|mate|homie|brah|dawg)\. (Thanks|Kudos|Props|Respect) for (writing stuff|sending ideas) to steer my trip\.)|"
"(@__username__: __WORD__, __WORD__, __WORD__ (EVERYWHERE|ALL AROUND|ALL OVER)\. (WaaaaAAAah|Wooooooooooow))|"
#"(Wow, very @__username__, such __word__, much (amazing|bazinga|space|woop)\.)" #disliked by native english people
)
EXREX_REGEX_TWO = ("(@__username__: One __word0__ with __word1__ coming (up next|your way)!)|"
"(@__username__: Yeah, let's (try|create|dream of|watch) __word0__ with a (topping|layer) of __word1__!)"
)
EXREX_REGEX_MORE = ("(@__username__: __words__, I'll mash them all up for ya\.)")
class ChatReader(TwitchChatStream):
def __init__(self, *args, **kwargs):
super(ChatReader, self).__init__(*args, **kwargs)
#make a data structure to easily parse chat messages
self.classes = np.load("data/classes.npy")
ignore_list = ['the', 'of', 't', 'and', "the", "be", "to", "of", "and", "a", "in", "that", "have", "i", "it",
"for", "not", "on", "with", "he", "as", "you", "do", "at", "this", "but", "his", "by",
"from", "they", "we", "say", "her", "she", "or", "an", "will", "my", "one", "all", "would",
"there", "their", "what", "so", "up", "out", "if", "about", "who", "get", "which",
"go", "me", "when", "make", "can", "like", "time", "no", "just", "him", "know", "take",
"people", "into", "year", "your", "good", "some", "could", "them", "see", "other",
"than", "then", "now", "look", "only", "come", "its", "over", "think", "also",
"back", "after", "use", "two", "how", "our", "work", "first", "well", "way", "even", "new",
"want", "because", "any", "these", "give", "day", "most", "us"]
# First, check if the complete string matches
d = {d[0]:i for i,d in enumerate(self.classes)}
self.full_dictionary = {}
for str,i in d.iteritems():
for word in str.split(','):
word = word.lower().strip()
if word in ignore_list:
continue
if word in self.full_dictionary:
self.full_dictionary[word].append(i)
else:
self.full_dictionary[word] = [i]
# r'\bAND\b | \bOR\b | \bNOT\b'
self.regexes = []
regex_string = " | ".join([r"^%s$"%word.replace(" ",r"\ ") for word in self.full_dictionary.keys()])
self.regexes.append((self.full_dictionary, re.compile(regex_string, flags=re.I | re.X)))
regex_string2 = " | ".join([r"\b%s\b"%word.replace(" ",r"\ ") for word in self.full_dictionary.keys()])
self.dictionary = copy.deepcopy(self.full_dictionary)
# Second, check if complete string matches a word
for str,i in d.iteritems():
for word in re.findall(r"[\w']+", str):
word = word.lower()
if word in ignore_list:
continue
if word in self.dictionary:
self.dictionary[word].append(i)
else:
self.dictionary[word] = [i]
regex_string = " | ".join([r"^%s$"%word.replace(" ",r"\ ") for word in self.dictionary.keys()])
self.regexes.append((self.dictionary, re.compile(regex_string, flags=re.I | re.X)))
# This was deemed too sensitive by a lot of people
"""
# third, check if complete thing is in string
self.regexes.append((self.full_dictionary, re.compile(regex_string2, flags=re.I | re.X)))
# fourth, check if words are found in the string
regex_string = " | ".join([r"\b%s\b"%word.replace(" ",r"\ ") for word in self.dictionary.keys()])
self.regexes.append((self.dictionary, re.compile(regex_string, flags=re.I | re.X)))
"""
self.currentwords = deque(maxlen=1)
self.current_features = [random.randint(0,999)]
self.last_read_time = 0
self.hold_subject_seconds = 60
self.display_string = ""
self.message_queue = deque(maxlen=100)
self.max_features = 2
@staticmethod
def get_cheesy_chat_message(username, words):
if len(words)==1:
return exrex.getone(EXREX_REGEX_ONE).replace("__username__", username)\
.replace("__USERNAME__", username.capitalize())\
.replace("__word__",words[0])\
.replace("__WORD__",words[0].capitalize())
elif len(words)==2:
return exrex.getone(EXREX_REGEX_TWO).replace("__username__", username)\
.replace("__USERNAME__", username.capitalize())\
.replace("__word0__",words[0])\
.replace("__WORD0__",words[0].capitalize())\
.replace("__word1__",words[1])\
.replace("__WORD1__",words[1].capitalize())
else:
wordstring = " & ".join(words)
return exrex.getone(EXREX_REGEX_MORE).replace("__username__", username)\
.replace("__USERNAME__", username.capitalize())\
.replace("__words__",wordstring)\
.replace("__WORDS__",wordstring.capitalize())
def process_the_chat(self):
display_string = self.display_string
features = self.current_features
messages = self.twitch_recieve_messages() #you always need to check for ping messages
self.message_queue.extend(messages)
# [{'username': '317070', 'message': u'test again', 'channel': '#317070'}]
if time.time() - self.last_read_time < self.hold_subject_seconds:
return features, display_string
try:
messages = list(self.message_queue)
random.shuffle(messages)
self.message_queue.clear()
#spaghetti code warning ahead
found = False
for message in messages:
queries = filter(None, [w.strip() for w in message['message'].split('+')])
total_features = []
total_correct_terms = []
for query in queries:
for dictionary, regex in self.regexes:
hits = regex.findall(query)
if hits:
print hits
correct_terms = []
features = []
words_used = []
for h in set(hits):
word = h.lower()
if any(current_feature in dictionary[word] for current_feature in self.current_features):
continue
feature = random.choice(dictionary[word])
features.append(feature)
correct_term = ""
#print self.classes[feature][0].lower()
for term in self.classes[feature][0].lower().split(','):
if word in term:
correct_term = term.strip()
break
correct_terms.append(correct_term)
words_used.append(word)
if len(features)==0:
continue
#We want at most (max_features) features
#print features, correct_terms
features, correct_terms, words_used = zip(*random.sample(zip(features, correct_terms, words_used), min(len(features), self.max_features)))
if len(words_used)>1:
if message['message'].index(words_used[1]) < message['message'].index(words_used[0]):
features = features.reverse()
correct_terms = correct_terms.reverse()
words_used = words_used.reverse()
#print regex.pattern
total_features.extend(features)
total_correct_terms.extend(correct_terms)
break
if len(total_features)==0:
continue
total_features = total_features[:2]
total_correct_terms = total_correct_terms[:2]
username = message['username']
if len(total_features)==1:
display_string = "@"+username+": "+total_correct_terms[0]
else:
display_string = " & ".join(total_correct_terms)
chat_message = ChatReader.get_cheesy_chat_message(username, total_correct_terms)
self.send_chat_message(chat_message)
self.last_read_time = time.time()
found = True
break
if not found:
return self.current_features, self.display_string
self.current_features = total_features
self.display_string = display_string
print [self.classes[feature][0] for feature in total_features]
return total_features, display_string
except:
# let the chat users not crash the entire program
self.message_queue.clear()
import traceback
import sys
print "current things:", self.display_string
print "messages", list(self.message_queue)
print(traceback.format_exc())
return features, display_string #return default and continue with work
| |
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.validation import Validation
from course.models import Course, Term, Instructor, MeetingTime, Attribute, Material, FollowEntry, CourseVersion
from schedule.models import ScheduleEntry
from account.models import Profile
class TermResource(ModelResource):
class Meta:
queryset = Term.objects.all()
resource_name = 'term'
allowed_methods = ['get']
filtering = {
'value': ALL,
'name': ALL,
}
class InstructorResource(ModelResource):
class Meta:
queryset = Instructor.objects.all()
resource_name = 'instructor'
allowed_methods = ['get']
filtering = {
'first_name': ALL,
'last_name': ALL,
'rmp_score': ALL,
'rmp_link': ALL,
}
class AttributeResource(ModelResource):
class Meta:
queryset = Attribute.objects.all()
resource_name = 'attribute'
allowed_methods = ['get']
filtering = {
'value': ALL,
'name': ALL,
}
class MeetingTimeResource(ModelResource):
class Meta:
queryset = MeetingTime.objects.all()
resource_name = 'meeting time'
allowed_methods = ['get']
filtering = {
'days': ALL,
'start_time': ALL,
'end_time': ALL,
}
class CourseResource(ModelResource):
term = fields.ToOneField(TermResource, 'term', full=True)
instructor = fields.ToOneField(InstructorResource, 'instructor', full=True)
meeting_times = fields.ToManyField(MeetingTimeResource, 'meeting_times', full=True)
class Meta:
queryset = Course.objects.all()
resource_name = 'course'
allowed_methods = ['get']
filtering = {
'term': ALL_WITH_RELATIONS,
'crn': ALL,
'course': ALL,
'course_link': ALL,
'section': ALL,
'title': ALL,
'bookstore_link': ALL,
'hours': ALL,
'attribute': ALL,
'ctype': ALL,
'meeting_times': ALL_WITH_RELATIONS,
'location': ALL,
'instructor': ALL_WITH_RELATIONS,
'seats': ALL,
'status': ALL,
}
class MaterialResource(ModelResource):
term = fields.ToOneField(TermResource, 'term', full=True)
class Meta:
queryset = Material.objects.all()
resource_name = 'material'
allowed_methods = ['get']
filtering = {
'term': ALL_WITH_RELATIONS,
'course_crn': ALL,
'isbn': ALL,
'title': ALL,
'author': ALL,
'publisher': ALL,
'edition': ALL,
'year': ALL,
}
class CourseVersionResource(ModelResource):
term = fields.ToOneField(TermResource, 'term', full=True)
class Meta:
queryset = CourseVersion.objects.all()
resource_name = 'courseversion'
allowed_methods = ['get']
filtering = {
'term': ALL_WITH_RELATIONS,
'course_crn': ALL,
'time_created': ALL,
}
class CRNValidation(Validation):
def is_valid(self, bundle, request=None):
if not bundle.data:
return {'__all__': 'No data provided.'}
errors = {}
term = bundle.data.get('term').split('/')[4]
course_crn = bundle.data.get('course_crn')
try:
Course.objects.get(term=term, crn=course_crn)
except Course.DoesNotExist:
errors['course_crn'] = ['Course does not exist for this term.']
return errors
class ScheduleResource(ModelResource):
term = fields.ToOneField(TermResource, 'term', full=True)
class Meta:
queryset = ScheduleEntry.objects.all()
resource_name = 'schedule'
allowed_methods = ['get', 'post', 'delete']
authentication = ApiKeyAuthentication()
authorization = Authorization()
validation = CRNValidation()
filtering = {
'course_crn': ALL,
'term': ALL_WITH_RELATIONS,
}
def obj_create(self, bundle, **kwargs):
return super(ScheduleResource, self).obj_create(bundle, user=bundle.request.user)
def get_object_list(self, request):
return super(ScheduleResource, self).get_object_list(request).filter(user=request.user)
class FollowResource(ModelResource):
term = fields.ToOneField(TermResource, 'term', full=True)
class Meta:
queryset = FollowEntry.objects.all()
resource_name = 'follow'
allowed_methods = ['get', 'post', 'delete']
authentication = ApiKeyAuthentication()
authorization = Authorization()
validation = CRNValidation()
filtering = {
'course_crn': ALL,
'term': ALL_WITH_RELATIONS,
}
def obj_create(self, bundle, **kwargs):
return super(FollowResource, self).obj_create(bundle, user=bundle.request.user)
def get_object_list(self, request):
return super(FollowResource, self).get_object_list(request).filter(user=request.user)
class ProfileValidation(Validation):
def is_valid(self, bundle, request=None):
if not bundle.data:
return {'__all__': 'No data provided.'}
user = bundle.request.user
try:
obj = Profile.objects.get(user=user)
if 'learning_community' in bundle.data:
obj.learning_community = bundle.data.get('learning_community')
if 'default_term' in bundle.data:
obj.default_term = bundle.data.get('default_term')
if 'facebook_id' in bundle.data:
obj.facebook_id = bundle.data.get('facebook_id')
if 'preferred_name' in bundle.data:
obj.preferred_name = bundle.data.get('preferred_name')
if 'show_archived_terms' in bundle.data:
obj.show_archived_terms = bundle.data.get('show_archived_terms')
if 'show_colors_schedule' in bundle.data:
obj.show_colors_schedule = bundle.data.get('show_colors_schedule')
if 'show_details_schedule' in bundle.data:
obj.show_details_schedule = bundle.data.get('show_details_schedule')
obj.save()
return {'__all__': 'Profile already exists, has been updated.'}
except Profile.DoesNotExist:
return {}
class ProfileResource(ModelResource):
class Meta:
queryset = Profile.objects.all()
resource_name = 'profile'
allowed_methods = ['get', 'post']
authentication = ApiKeyAuthentication()
authorization = Authorization()
validation = ProfileValidation()
def obj_update(self, bundle, **kwargs):
return super(ProfileResource, self).obj_update(bundle, user=bundle.request.user)
def get_object_list(self, request):
return super(ProfileResource, self).get_object_list(request).filter(user=request.user)
| |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Ceilometer notify daemon."""
import shutil
import eventlet
import mock
from oslo_config import fixture as fixture_config
from oslo_context import context
import oslo_messaging
import oslo_messaging.conffixture
import oslo_service.service
from oslo_utils import fileutils
from oslo_utils import timeutils
import six
from stevedore import extension
import yaml
from ceilometer.compute.notifications import instance
from ceilometer import messaging
from ceilometer import notification
from ceilometer.publisher import test as test_publisher
from ceilometer import service
from ceilometer.tests import base as tests_base
TEST_NOTICE_CTXT = {
u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2',
u'is_admin': True,
u'project_id': u'7c150a59fe714e6f9263774af9688f0e',
u'quota_class': None,
u'read_deleted': u'no',
u'remote_address': u'10.0.2.15',
u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66',
u'roles': [u'admin'],
u'timestamp': u'2012-05-08T20:23:41.425105',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
}
TEST_NOTICE_METADATA = {
u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451',
u'timestamp': u'2012-05-08 20:23:48.028195',
}
TEST_NOTICE_PAYLOAD = {
u'created_at': u'2012-05-08 20:23:41',
u'deleted_at': u'',
u'disk_gb': 0,
u'display_name': u'testme',
u'fixed_ips': [{u'address': u'10.0.0.2',
u'floating_ips': [],
u'meta': {},
u'type': u'fixed',
u'version': 4}],
u'image_ref_url': u'http://10.0.2.15:9292/images/UUID',
u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1',
u'instance_type': u'm1.tiny',
u'instance_type_id': 2,
u'launched_at': u'2012-05-08 20:23:47.985999',
u'memory_mb': 512,
u'state': u'active',
u'state_description': u'',
u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3',
u'vcpus': 1,
u'root_gb': 0,
u'ephemeral_gb': 0,
u'host': u'compute-host-name',
u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4',
u'os_type': u'linux?',
u'architecture': u'x86',
u'image_ref': u'UUID',
u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5',
u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6',
}
class TestNotification(tests_base.BaseTestCase):
def setUp(self):
super(TestNotification, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF.set_override("connection", "log://", group='database')
self.CONF.set_override("store_events", False, group="notification")
self.CONF.set_override("disable_non_metric_meters", False,
group="notification")
self.setup_messaging(self.CONF)
self.srv = notification.NotificationService()
def fake_get_notifications_manager(self, pm):
self.plugin = instance.Instance(pm)
return extension.ExtensionManager.make_test_instance(
[
extension.Extension('test',
None,
None,
self.plugin)
]
)
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
@mock.patch.object(oslo_messaging.MessageHandlingServer, 'start',
mock.MagicMock())
@mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint')
def _do_process_notification_manager_start(self,
fake_event_endpoint_class):
with mock.patch.object(self.srv,
'_get_notifications_manager') as get_nm:
get_nm.side_effect = self.fake_get_notifications_manager
self.srv.start()
self.fake_event_endpoint = fake_event_endpoint_class.return_value
def test_start_multiple_listeners(self):
urls = ["fake://vhost1", "fake://vhost2"]
self.CONF.set_override("messaging_urls", urls, group="notification")
self._do_process_notification_manager_start()
self.assertEqual(2, len(self.srv.listeners))
def test_process_notification(self):
self._do_process_notification_manager_start()
self.srv.pipeline_manager.pipelines[0] = mock.MagicMock()
self.plugin.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise',
'compute.instance.create.end',
TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA)
self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints))
self.assertTrue(self.srv.pipeline_manager.publisher.called)
def test_process_notification_no_events(self):
self._do_process_notification_manager_start()
self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints))
self.assertNotEqual(self.fake_event_endpoint,
self.srv.listeners[0].dispatcher.endpoints[0])
@mock.patch('ceilometer.pipeline.setup_event_pipeline', mock.MagicMock())
def test_process_notification_with_events(self):
self.CONF.set_override("store_events", True, group="notification")
self._do_process_notification_manager_start()
self.assertEqual(2, len(self.srv.listeners[0].dispatcher.endpoints))
self.assertEqual(self.fake_event_endpoint,
self.srv.listeners[0].dispatcher.endpoints[0])
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
@mock.patch.object(oslo_messaging.MessageHandlingServer, 'start',
mock.MagicMock())
@mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint')
def test_unique_consumers(self, fake_event_endpoint_class):
def fake_get_notifications_manager_dup_targets(pm):
plugin = instance.Instance(pm)
return extension.ExtensionManager.make_test_instance(
[extension.Extension('test', None, None, plugin),
extension.Extension('test', None, None, plugin)])
with mock.patch.object(self.srv,
'_get_notifications_manager') as get_nm:
get_nm.side_effect = fake_get_notifications_manager_dup_targets
self.srv.start()
self.assertEqual(1, len(self.srv.listeners[0].dispatcher.targets))
class BaseRealNotification(tests_base.BaseTestCase):
def setup_pipeline(self, counter_names):
pipeline = yaml.dump({
'sources': [{
'name': 'test_pipeline',
'interval': 5,
'meters': counter_names,
'sinks': ['test_sink']
}],
'sinks': [{
'name': 'test_sink',
'transformers': [],
'publishers': ['test://']
}]
})
if six.PY3:
pipeline = pipeline.encode('utf-8')
pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline,
prefix="pipeline",
suffix="yaml")
return pipeline_cfg_file
def setup_event_pipeline(self, event_names):
ev_pipeline = yaml.dump({
'sources': [{
'name': 'test_event',
'events': event_names,
'sinks': ['test_sink']
}],
'sinks': [{
'name': 'test_sink',
'publishers': ['test://']
}]
})
if six.PY3:
ev_pipeline = ev_pipeline.encode('utf-8')
ev_pipeline_cfg_file = fileutils.write_to_tempfile(
content=ev_pipeline, prefix="event_pipeline", suffix="yaml")
return ev_pipeline_cfg_file
def setUp(self):
super(BaseRealNotification, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
# Dummy config file to avoid looking for system config
service.prepare_service(argv=[], config_files=[])
self.setup_messaging(self.CONF, 'nova')
pipeline_cfg_file = self.setup_pipeline(['instance', 'memory'])
self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file)
self.expected_samples = 2
self.CONF.set_override("store_events", True, group="notification")
self.CONF.set_override("disable_non_metric_meters", False,
group="notification")
ev_pipeline_cfg_file = self.setup_event_pipeline(
['compute.instance.*'])
self.expected_events = 1
self.CONF.set_override("event_pipeline_cfg_file",
ev_pipeline_cfg_file)
self.CONF.set_override(
"definitions_cfg_file",
self.path_get('etc/ceilometer/event_definitions.yaml'),
group='event')
self.publisher = test_publisher.TestPublisher("")
def _check_notification_service(self):
self.srv.start()
notifier = messaging.get_notifier(self.transport,
"compute.vagrant-precise")
notifier.info(context.RequestContext(), 'compute.instance.create.end',
TEST_NOTICE_PAYLOAD)
start = timeutils.utcnow()
while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
if (len(self.publisher.samples) >= self.expected_samples and
len(self.publisher.events) >= self.expected_events):
break
eventlet.sleep(0)
self.assertNotEqual(self.srv.listeners, self.srv.pipeline_listeners)
self.srv.stop()
resources = list(set(s.resource_id for s in self.publisher.samples))
self.assertEqual(self.expected_samples, len(self.publisher.samples))
self.assertEqual(self.expected_events, len(self.publisher.events))
self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources)
class TestRealNotificationReloadablePipeline(BaseRealNotification):
def setUp(self):
super(TestRealNotificationReloadablePipeline, self).setUp()
self.CONF.set_override('refresh_pipeline_cfg', True)
self.CONF.set_override('refresh_event_pipeline_cfg', True)
self.CONF.set_override('pipeline_polling_interval', 1)
self.srv = notification.NotificationService()
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_notification_pipeline_poller(self, fake_publisher_cls):
fake_publisher_cls.return_value = self.publisher
self.srv.tg = mock.MagicMock()
self.srv.start()
pipeline_poller_call = mock.call(1, self.srv.refresh_pipeline)
self.assertIn(pipeline_poller_call,
self.srv.tg.add_timer.call_args_list)
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_notification_reloaded_pipeline(self, fake_publisher_cls):
fake_publisher_cls.return_value = self.publisher
pipeline_cfg_file = self.setup_pipeline(['instance'])
self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file)
self.expected_samples = 1
self.srv.start()
notifier = messaging.get_notifier(self.transport,
"compute.vagrant-precise")
notifier.info(context.RequestContext(), 'compute.instance.create.end',
TEST_NOTICE_PAYLOAD)
start = timeutils.utcnow()
while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
if (len(self.publisher.samples) >= self.expected_samples and
len(self.publisher.events) >= self.expected_events):
break
eventlet.sleep(0)
self.assertEqual(self.expected_samples, len(self.publisher.samples))
# Flush publisher samples to test reloading
self.publisher.samples = []
# Modify the collection targets
updated_pipeline_cfg_file = self.setup_pipeline(['vcpus',
'disk.root.size'])
# Move/re-name the updated pipeline file to the original pipeline
# file path as recorded in oslo config
shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file)
self.expected_samples = 2
# Random sleep to let the pipeline poller complete the reloading
eventlet.sleep(3)
# Send message again to verify the reload works
notifier = messaging.get_notifier(self.transport,
"compute.vagrant-precise")
notifier.info(context.RequestContext(), 'compute.instance.create.end',
TEST_NOTICE_PAYLOAD)
start = timeutils.utcnow()
while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
if (len(self.publisher.samples) >= self.expected_samples and
len(self.publisher.events) >= self.expected_events):
break
eventlet.sleep(0)
self.assertEqual(self.expected_samples, len(self.publisher.samples))
(self.assertIn(sample.name, ['disk.root.size', 'vcpus'])
for sample in self.publisher.samples)
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_notification_reloaded_event_pipeline(self, fake_publisher_cls):
fake_publisher_cls.return_value = self.publisher
ev_pipeline_cfg_file = self.setup_event_pipeline(
['compute.instance.create.start'])
self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file)
self.CONF.set_override("store_events", True, group="notification")
self.expected_events = 1
self.srv.start()
notifier = messaging.get_notifier(self.transport,
"compute.vagrant-precise")
notifier.info(context.RequestContext(),
'compute.instance.create.start',
TEST_NOTICE_PAYLOAD)
start = timeutils.utcnow()
while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
if len(self.publisher.events) >= self.expected_events:
break
eventlet.sleep(0)
self.assertEqual(self.expected_events, len(self.publisher.events))
# Flush publisher events to test reloading
self.publisher.events = []
# Modify the collection targets
updated_ev_pipeline_cfg_file = self.setup_event_pipeline(
['compute.instance.*'])
# Move/re-name the updated pipeline file to the original pipeline
# file path as recorded in oslo config
shutil.move(updated_ev_pipeline_cfg_file, ev_pipeline_cfg_file)
self.expected_events = 1
# Random sleep to let the pipeline poller complete the reloading
eventlet.sleep(3)
# Send message again to verify the reload works
notifier = messaging.get_notifier(self.transport,
"compute.vagrant-precise")
notifier.info(context.RequestContext(), 'compute.instance.create.end',
TEST_NOTICE_PAYLOAD)
start = timeutils.utcnow()
while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
if len(self.publisher.events) >= self.expected_events:
break
eventlet.sleep(0)
self.assertEqual(self.expected_events, len(self.publisher.events))
self.assertEqual(self.publisher.events[0].event_type,
'compute.instance.create.end')
class TestRealNotification(BaseRealNotification):
def setUp(self):
super(TestRealNotification, self).setUp()
self.srv = notification.NotificationService()
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_notification_service(self, fake_publisher_cls):
fake_publisher_cls.return_value = self.publisher
self._check_notification_service()
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_notification_service_error_topic(self, fake_publisher_cls):
fake_publisher_cls.return_value = self.publisher
self.srv.start()
notifier = messaging.get_notifier(self.transport,
'compute.vagrant-precise')
notifier.error(context.RequestContext(), 'compute.instance.error',
TEST_NOTICE_PAYLOAD)
start = timeutils.utcnow()
while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
if len(self.publisher.events) >= self.expected_events:
break
eventlet.sleep(0)
self.srv.stop()
self.assertEqual(self.expected_events, len(self.publisher.events))
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_notification_disable_non_metrics(self, fake_publisher_cls):
self.CONF.set_override("disable_non_metric_meters", True,
group="notification")
# instance is a not a metric. we should only get back memory
self.expected_samples = 1
fake_publisher_cls.return_value = self.publisher
self._check_notification_service()
self.assertEqual('memory', self.publisher.samples[0].name)
@mock.patch.object(oslo_service.service.Service, 'stop')
def test_notification_service_start_abnormal(self, mocked):
try:
self.srv.stop()
except Exception:
pass
self.assertEqual(1, mocked.call_count)
class TestRealNotificationHA(BaseRealNotification):
def setUp(self):
super(TestRealNotificationHA, self).setUp()
self.CONF.set_override('workload_partitioning', True,
group='notification')
self.srv = notification.NotificationService()
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_notification_service(self, fake_publisher_cls):
fake_publisher_cls.return_value = self.publisher
self._check_notification_service()
def test_reset_listeners_on_refresh(self):
self.srv.start()
listeners = self.srv.pipeline_listeners
self.assertEqual(20, len(listeners))
self.srv._configure_pipeline_listeners()
self.assertEqual(20, len(self.srv.pipeline_listeners))
for listener in listeners:
self.assertNotIn(listeners, set(self.srv.pipeline_listeners))
self.srv.stop()
def test_retain_common_listeners_on_refresh(self):
with mock.patch('ceilometer.coordination.PartitionCoordinator'
'.extract_my_subset', return_value=[1, 2]):
self.srv.start()
self.assertEqual(4, len(self.srv.pipeline_listeners))
listeners = [listener for listener in self.srv.pipeline_listeners]
with mock.patch('ceilometer.coordination.PartitionCoordinator'
'.extract_my_subset', return_value=[1, 3]):
self.srv._refresh_agent(None)
self.assertEqual(4, len(self.srv.pipeline_listeners))
for listener in listeners:
if listener.dispatcher.targets[0].topic.endswith('1'):
self.assertIn(listener, set(self.srv.pipeline_listeners))
else:
self.assertNotIn(listener, set(self.srv.pipeline_listeners))
self.srv.stop()
@mock.patch('oslo_messaging.Notifier.sample')
def test_broadcast_to_relevant_pipes_only(self, mock_notifier):
self.srv.start()
for endpoint in self.srv.listeners[0].dispatcher.endpoints:
if (hasattr(endpoint, 'filter_rule') and
not endpoint.filter_rule.match(None, None, 'nonmatching.end',
None, None)):
continue
endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise',
'nonmatching.end',
TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA)
self.assertFalse(mock_notifier.called)
for endpoint in self.srv.listeners[0].dispatcher.endpoints:
if (hasattr(endpoint, 'filter_rule') and
not endpoint.filter_rule.match(None, None,
'compute.instance.create.end',
None, None)):
continue
endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise',
'compute.instance.create.end',
TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA)
self.assertTrue(mock_notifier.called)
self.assertEqual(3, mock_notifier.call_count)
self.assertEqual('pipeline.event',
mock_notifier.call_args_list[0][1]['event_type'])
self.assertEqual('ceilometer.pipeline',
mock_notifier.call_args_list[1][1]['event_type'])
self.assertEqual('ceilometer.pipeline',
mock_notifier.call_args_list[2][1]['event_type'])
self.srv.stop()
class TestRealNotificationMultipleAgents(tests_base.BaseTestCase):
def setup_pipeline(self, transformers):
pipeline = yaml.dump({
'sources': [{
'name': 'test_pipeline',
'interval': 5,
'meters': ['instance', 'memory'],
'sinks': ['test_sink']
}],
'sinks': [{
'name': 'test_sink',
'transformers': transformers,
'publishers': ['test://']
}]
})
if six.PY3:
pipeline = pipeline.encode('utf-8')
pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline,
prefix="pipeline",
suffix="yaml")
return pipeline_cfg_file
def setUp(self):
super(TestRealNotificationMultipleAgents, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
service.prepare_service(argv=[], config_files=[])
self.setup_messaging(self.CONF, 'nova')
pipeline_cfg_file = self.setup_pipeline(['instance', 'memory'])
self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file)
self.CONF.set_override("store_events", False, group="notification")
self.CONF.set_override("disable_non_metric_meters", False,
group="notification")
self.CONF.set_override('workload_partitioning', True,
group='notification')
self.CONF.set_override('pipeline_processing_queues', 2,
group='notification')
self.publisher = test_publisher.TestPublisher("")
self.publisher2 = test_publisher.TestPublisher("")
def _check_notifications(self, fake_publisher_cls):
fake_publisher_cls.side_effect = [self.publisher, self.publisher2]
self.srv = notification.NotificationService()
self.srv2 = notification.NotificationService()
with mock.patch('ceilometer.coordination.PartitionCoordinator'
'._get_members', return_value=['harry', 'lloyd']):
with mock.patch('uuid.uuid4', return_value='harry'):
self.srv.start()
with mock.patch('uuid.uuid4', return_value='lloyd'):
self.srv2.start()
notifier = messaging.get_notifier(self.transport,
"compute.vagrant-precise")
payload1 = TEST_NOTICE_PAYLOAD.copy()
payload1['instance_id'] = '0'
notifier.info(context.RequestContext(), 'compute.instance.create.end',
payload1)
payload2 = TEST_NOTICE_PAYLOAD.copy()
payload2['instance_id'] = '1'
notifier.info(context.RequestContext(), 'compute.instance.create.end',
payload2)
self.expected_samples = 4
start = timeutils.utcnow()
with mock.patch('six.moves.builtins.hash', lambda x: int(x)):
while timeutils.delta_seconds(start, timeutils.utcnow()) < 60:
if (len(self.publisher.samples + self.publisher2.samples) >=
self.expected_samples):
break
eventlet.sleep(0)
self.srv.stop()
self.srv2.stop()
self.assertEqual(2, len(self.publisher.samples))
self.assertEqual(2, len(self.publisher2.samples))
self.assertEqual(1, len(set(
s.resource_id for s in self.publisher.samples)))
self.assertEqual(1, len(set(
s.resource_id for s in self.publisher2.samples)))
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_multiple_agents_no_transform(self, fake_publisher_cls):
pipeline_cfg_file = self.setup_pipeline([])
self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file)
self._check_notifications(fake_publisher_cls)
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_multiple_agents_transform(self, fake_publisher_cls):
pipeline_cfg_file = self.setup_pipeline(
[{
'name': 'unit_conversion',
'parameters': {
'source': {},
'target': {'name': 'cpu_mins',
'unit': 'min',
'scale': 'volume'},
}
}])
self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file)
self._check_notifications(fake_publisher_cls)
@mock.patch('ceilometer.publisher.test.TestPublisher')
def test_multiple_agents_multiple_transform(self, fake_publisher_cls):
pipeline_cfg_file = self.setup_pipeline(
[{
'name': 'unit_conversion',
'parameters': {
'source': {},
'target': {'name': 'cpu_mins',
'unit': 'min',
'scale': 'volume'},
}
}, {
'name': 'unit_conversion',
'parameters': {
'source': {},
'target': {'name': 'cpu_mins',
'unit': 'min',
'scale': 'volume'},
}
}])
self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file)
self._check_notifications(fake_publisher_cls)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic building blocks used in eager mode RevNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks_test
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
from tensorflow.python.client import device_lib
tfe = tf.contrib.eager
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
grads, vars_, loss = model.compute_gradients(inputs, labels, training=True)
optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
return loss
class RevNetTest(tf.test.TestCase):
def setUp(self):
super(RevNetTest, self).setUp()
config = config_.get_hparams_cifar_38()
# Reconstruction could cause numerical error, use double precision for tests
config.dtype = tf.float64
config.fused = False # Fused batch norm does not support tf.float64
shape = (config.batch_size,) + config.input_shape
self.model = revnet.RevNet(config=config)
self.x = tf.random_normal(shape=shape, dtype=tf.float64)
self.t = tf.random_uniform(
shape=[config.batch_size],
minval=0,
maxval=config.n_classes,
dtype=tf.int64)
self.config = config
def tearDown(self):
del self.model
del self.x
del self.t
del self.config
super(RevNetTest, self).tearDown()
def test_call(self):
"""Test `call` function."""
y, _ = self.model(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
def _check_grad_angle_combined(self, grads, grads_true):
"""Verify that the reconstructed gradients has correct direction.
Due to numerical imprecision, the magnitude may be slightly different.
Yet according to the paper, the angle should be roughly the same.
Args:
grads: list of gradients from reconstruction
grads_true: list of true gradients
"""
def _combine(gs):
return [tf.reshape(g, [-1]) for g in gs]
g1_all = tf.concat(_combine(grads), axis=0)
g2_all = tf.concat(_combine(grads_true), axis=0)
self.assertEqual(len(g1_all.shape), 1)
self.assertEqual(len(g2_all.shape), 1)
degree = blocks_test.compute_degree(g1_all, g2_all)
self.assertLessEqual(degree, 1e0)
def test_compute_gradients(self):
"""Test `compute_gradients` function."""
self.model(self.x, training=False) # Initialize model
grads, vars_, loss = self.model.compute_gradients(
inputs=self.x, labels=self.t, training=True, l2_reg=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
for grad, var in zip(grads, vars_):
self.assertEqual(grad.shape, var.shape)
# Compare against the true gradient computed by the tape
with tf.GradientTape() as tape:
logits, _ = self.model(self.x, training=True)
loss_true = self.model.compute_loss(logits=logits, labels=self.t)
grads_true = tape.gradient(loss_true, vars_)
self.assertAllClose(loss, loss_true)
self.assertAllClose(grads, grads_true, rtol=1e-4, atol=1e-4)
self._check_grad_angle_combined(grads, grads_true)
def test_call_defun(self):
"""Test `call` function with defun."""
y, _ = tfe.defun(self.model.call)(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
def test_compute_gradients_defun(self):
"""Test `compute_gradients` function with defun."""
compute_gradients = tfe.defun(self.model.compute_gradients)
grads, vars_, _ = compute_gradients(self.x, self.t, training=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
for grad, var in zip(grads, vars_):
if grad is not None:
self.assertEqual(grad.shape, var.shape)
def test_training_graph(self):
"""Test model training in graph mode."""
with tf.Graph().as_default():
config = config_.get_hparams_cifar_38()
x = tf.random_normal(
shape=(self.config.batch_size,) + self.config.input_shape)
t = tf.random_uniform(
shape=(self.config.batch_size,),
minval=0,
maxval=self.config.n_classes,
dtype=tf.int32)
global_step = tfe.Variable(0., trainable=False)
model = revnet.RevNet(config=config)
model(x)
updates = model.get_updates_for(x)
x_ = tf.identity(x)
grads_all, vars_all, _ = model.compute_gradients(x_, t, training=True)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
with tf.control_dependencies(updates):
train_op = optimizer.apply_gradients(
zip(grads_all, vars_all), global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(1):
sess.run(train_op)
# Benchmark related
def device_and_data_format():
return ("/gpu:0",
"channels_first") if tf.test.is_gpu_available() else ("/cpu:0",
"channels_last")
def random_batch(batch_size, config):
shape = (batch_size,) + config.input_shape
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=config.n_classes, dtype=tf.int32)
return images, labels
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class RevNetBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for RevNet."""
def _train_batch_sizes(self):
"""Shamelessly copied from `resnet50_test.py`.
Note: This is targeted towards ImageNet. CIFAR-10 should allow more
aggressive batch sizes.
Returns:
A tuple of possible batch sizes
"""
for device in device_lib.list_local_devices():
if tf.DeviceSpec.from_string(device.name).device_type == "GPU":
if "K20" in device.physical_device_desc:
return (16,)
if "P100" in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == "TPU":
return (32,)
return (16, 32)
def _force_device_sync(self):
"""Shamelessly copied from `resnet50_test.py`."""
tf.constant(1.).cpu()
def _report(self, label, start, num_iters, device, batch_size, data_format):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
name = "%s_%s_batch_%d_%s" % (label, dev, batch_size, data_format)
extras = {"examples_per_sec": batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _benchmark_eager_apply(self,
label,
device_and_format,
defun=False,
execution_mode=None,
compiled=False):
config = config_.get_hparams_imagenet_56()
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = revnet.RevNet(config=config)
if defun:
model.call = tfe.defun(model.call, compiled=compiled)
batch_size = 64
num_burn = 5
num_iters = 10
with tf.device(device):
images, _ = random_batch(batch_size, config)
for _ in range(num_burn):
model(images, training=False)
if execution_mode:
tfe.async_wait()
gc.collect()
start = time.time()
for _ in range(num_iters):
model(images, training=False)
if execution_mode:
tfe.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply(
"eager_apply_sync", device_and_data_format(), defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
"eager_apply_async",
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_call_defun(self):
self._benchmark_eager_apply(
"eager_apply_with_defun", device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None,
compiled=False):
config = config_.get_hparams_imagenet_56()
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, config)
model = revnet.RevNet(config=config)
optimizer = tf.train.GradientDescentOptimizer(0.1)
if defun:
model.call = tfe.defun(model.call)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in range(num_burn):
(images, labels) = iterator.next()
train_one_iter(model, images, labels, optimizer)
if execution_mode:
tfe.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in range(num_iters):
(images, labels) = iterator.next()
train_one_iter(model, images, labels, optimizer)
if execution_mode:
tfe.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train(
"eager_train_sync", MockIterator, device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
"eager_train_async",
MockIterator,
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_defun(self):
self._benchmark_eager_train(
"eager_train", MockIterator, device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device("/device:CPU:0"):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
"eager_train_dataset_with_defun",
make_iterator,
device_and_data_format(),
defun=True)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hooks for Cloud Memorystore service"""
from typing import Dict, Optional, Sequence, Tuple, Union
from google.api_core import path_template
from google.api_core.exceptions import NotFound
from google.api_core.retry import Retry
from google.cloud.memcache_v1beta2 import CloudMemcacheClient
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.cloud.redis_v1 import (
CloudRedisClient,
FailoverInstanceRequest,
InputConfig,
Instance,
OutputConfig,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudMemorystoreHook(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client: Optional[CloudRedisClient] = None
def get_conn(self) -> CloudRedisClient:
"""Retrieves client library object that allow access to Cloud Memorystore service."""
if not self._client:
self._client = CloudRedisClient(credentials=self._get_credentials())
return self._client
@staticmethod
def _append_label(instance: Instance, key: str, val: str) -> Instance:
"""
Append labels to provided Instance type
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:type instance: google.cloud.container_v1.types.Cluster
:param key: The key label
:type key: str
:param val:
:type val: str
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: Union[Dict, Instance],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:type instance_id: str
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:type instance: Union[Dict, google.cloud.redis_v1.types.Instance]
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
parent = f"projects/{project_id}/locations/{location}"
instance_name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
try:
self.log.info("Fetching instance: %s", instance_name)
instance = client.get_instance(
request={'name': instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
request={'parent': parent, 'instance_id': instance_id, 'instance': instance},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance created.")
return client.get_instance(
request={'name': instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Deletes a specific Redis instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(
request={'name': name},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(
request={'name': name},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def export_instance(
self,
location: str,
instance: str,
output_config: Union[Dict, OutputConfig],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
Redis will continue serving during this operation.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:type output_config: Union[Dict, google.cloud.redis_v1.types.OutputConfig]
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Exporting Instance: %s", name)
result = client.export_instance(
request={'name': name, 'output_config': output_config},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance exported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def failover_instance(
self,
location: str,
instance: str,
data_protection_mode: FailoverInstanceRequest.DataProtectionMode,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Initiates a failover of the master node to current replica node for a specific STANDARD tier Cloud
Memorystore for Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param data_protection_mode: Optional. Available data protection modes that the user can choose. If
it's unspecified, data protection mode will be LIMITED_DATA_LOSS by default.
:type data_protection_mode: google.cloud.redis_v1.gapic.enums.FailoverInstanceRequest
.DataProtectionMode
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Failovering Instance: %s", name)
result = client.failover_instance(
request={'name': name, 'data_protection_mode': data_protection_mode},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance failovered: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Gets the details of a specific Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
result = client.get_instance(
request={'name': name},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(
self,
location: str,
instance: str,
input_config: Union[Dict, InputConfig],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation.
When complete, the instance will contain only data from the imported file.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:type input_config: Union[Dict, google.cloud.redis_v1.types.InputConfig]
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Importing Instance: %s", name)
result = client.import_instance(
request={'name': name, 'input_config': input_config},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance imported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
page_size: int,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Lists all Redis instances owned by a project in either the specified location (region) or all
locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:type location: str
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:type page_size: int
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
result = client.list_instances(
request={'parent': parent, 'page_size': page_size},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: Union[Dict, FieldMask],
instance: Union[Dict, Instance],
project_id: str,
location: Optional[str] = None,
instance_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Updates the metadata and configuration of a specific Redis instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:type update_mask: Union[Dict, google.protobuf.field_mask_pb2.FieldMask]
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:type instance: Union[Dict, google.cloud.redis_v1.types.Instance]
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: The logical name of the Redis instance in the customer project.
:type instance_id: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
request={'update_mask': update_mask, 'instance': instance},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance updated: %s", instance.name)
class CloudMemorystoreMemcachedHook(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore for Memcached service APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client: Optional[CloudMemcacheClient] = None
def get_conn(
self,
):
"""Retrieves client library object that allow access to Cloud Memorystore Memcached service."""
if not self._client:
self._client = CloudMemcacheClient(credentials=self._get_credentials())
return self._client
@staticmethod
def _append_label(instance: cloud_memcache.Instance, key: str, val: str) -> cloud_memcache.Instance:
"""
Append labels to provided Instance type
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:type instance: google.cloud.memcache_v1beta2.types.cloud_memcache.Instance
:param key: The key label
:type key: str
:param val:
:type val: str
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def apply_parameters(
self,
node_ids: Sequence[str],
apply_all: bool,
project_id: str,
location: str,
instance_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Will update current set of Parameters to the set of specified nodes of the Memcached Instance.
:param node_ids: Nodes to which we should apply the instance-level parameter group.
:type node_ids: Sequence[str]
:param apply_all: Whether to apply instance-level parameter group to all nodes. If set to true,
will explicitly restrict users from specifying any nodes, and apply parameter group updates
to all nodes within the instance.
:type apply_all: bool
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: The logical name of the Memcached instance in the customer project.
:type instance_id: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
self.log.info("Applying update to instance: %s", instance_id)
result = client.apply_parameters(
name=name,
node_ids=node_ids,
apply_all=apply_all,
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance updated: %s", instance_id)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: Union[Dict, cloud_memcache.Instance],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Creates a Memcached instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: Required. The logical name of the Memcached instance in the customer project
with the following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:type instance_id: str
:param instance: Required. A Memcached [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:type instance: Union[Dict, google.cloud.memcache_v1beta2.types.cloud_memcache.Instance]
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
metadata = metadata or ()
parent = path_template.expand(
"projects/{project}/locations/{location}", project=project_id, location=location
)
instance_name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
try:
instance = client.get_instance(
name=instance_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
if isinstance(instance, dict):
instance = cloud_memcache.Instance(instance)
elif not isinstance(instance, cloud_memcache.Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
parent=parent,
instance_id=instance_id,
resource=instance,
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance created.")
return client.get_instance(
name=instance_name,
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Deletes a specific Memcached instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Memcached instance in the customer project.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance)
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Gets the details of a specific Memcached instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Memcached instance in the customer project.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance)
result = client.get_instance(name=name, retry=retry, timeout=timeout, metadata=metadata or ())
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Lists all Memcached instances owned by a project in either the specified location (region) or all
locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:type location: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
metadata = metadata or ()
parent = path_template.expand(
"projects/{project}/locations/{location}", project=project_id, location=location
)
result = client.list_instances(
parent=parent,
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: Union[Dict, FieldMask],
instance: Union[Dict, cloud_memcache.Instance],
project_id: str,
location: Optional[str] = None,
instance_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Updates the metadata and configuration of a specific Memcached instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`)
:type update_mask:
Union[Dict, google.protobuf.field_mask_pb2.FieldMask]
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:type instance: Union[Dict, google.cloud.memcache_v1beta2.types.cloud_memcache.Instance]
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: The logical name of the Memcached instance in the customer project.
:type instance_id: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
metadata = metadata or ()
if isinstance(instance, dict):
instance = cloud_memcache.Instance(instance)
elif not isinstance(instance, cloud_memcache.Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
update_mask=update_mask, resource=instance, retry=retry, timeout=timeout, metadata=metadata or ()
)
result.result()
self.log.info("Instance updated: %s", instance.name)
@GoogleBaseHook.fallback_to_default_project_id
def update_parameters(
self,
update_mask: Union[Dict, FieldMask],
parameters: Union[Dict, cloud_memcache.MemcacheParameters],
project_id: str,
location: str,
instance_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Updates the defined Memcached Parameters for an existing Instance. This method only stages the
parameters, it must be followed by apply_parameters to apply the parameters to nodes of
the Memcached Instance.
:param update_mask: Required. Mask of fields to update.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:type update_mask:
Union[Dict, google.protobuf.field_mask_pb2.FieldMask]
:param parameters: The parameters to apply to the instance.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.MemcacheParameters`
:type parameters: Union[Dict, google.cloud.memcache_v1beta2.types.cloud_memcache.MemcacheParameters]
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: The logical name of the Memcached instance in the customer project.
:type instance_id: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
metadata = metadata or ()
if isinstance(parameters, dict):
parameters = cloud_memcache.MemcacheParameters(parameters)
elif not isinstance(parameters, cloud_memcache.MemcacheParameters):
raise AirflowException("instance is not instance of MemcacheParameters type or python dict")
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
self.log.info("Staging update to instance: %s", instance_id)
result = client.update_parameters(
name=name,
update_mask=update_mask,
parameters=parameters,
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
result.result()
self.log.info("Update staged for instance: %s", instance_id)
| |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import numpy as np
from mpi4py import MPI
from brainiak.searchlight.searchlight import Searchlight
from brainiak.searchlight.searchlight import Diamond, Ball
"""Distributed Searchlight Test
"""
def cube_sfn(l, msk, myrad, bcast_var):
if np.all(msk) and np.any(msk):
return 1.0
return None
def test_searchlight_with_cube():
sl = Searchlight(sl_rad=3)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = True
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(cube_sfn)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
def test_searchlight_with_cube_poolsize_1():
sl = Searchlight(sl_rad=3)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = True
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(cube_sfn, pool_size=1)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
def diamond_sfn(l, msk, myrad, bcast_var):
assert not np.any(msk[~Diamond(3).mask_])
if np.all(msk[Diamond(3).mask_]):
return 1.0
return None
def test_searchlight_with_diamond():
sl = Searchlight(sl_rad=3, shape=Diamond)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = Diamond(3).mask_
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(diamond_sfn)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
def ball_sfn(l, msk, myrad, bcast_var):
x, y, z = np.mgrid[-myrad:myrad+1, -myrad:myrad+1, -myrad:myrad+1]
correct_mask = np.square(x) + np.square(y) + np.square(z) <= myrad ** 2
assert not np.any(msk[~Ball(3).mask_])
if np.all(correct_mask == msk):
return 1.0
return None
def test_searchlight_with_ball():
sl = Searchlight(sl_rad=3, shape=Ball)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = Ball(3).mask_
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(ball_sfn)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
MaskRadBcast = namedtuple("MaskRadBcast", "mask rad")
def test_instantiate():
sl = Searchlight(sl_rad=5, max_blk_edge=10)
assert sl
def voxel_test_sfn(l, msk, myrad, bcast):
rad = bcast.rad
# Check each point
for subj in l:
for _tr in range(subj.shape[3]):
tr = subj[:, :, :, _tr]
midpt = tr[rad, rad, rad]
for d0 in range(tr.shape[0]):
for d1 in range(tr.shape[1]):
for d2 in range(tr.shape[2]):
assert np.array_equal(tr[d0, d1, d2] - midpt,
np.array([d0-rad, d1-rad,
d2-rad, 0]))
# Determine midpoint
midpt = l[0][rad, rad, rad, 0]
midpt = (midpt[0], midpt[1], midpt[2])
for d0 in range(msk.shape[0]):
for d1 in range(msk.shape[1]):
for d2 in range(msk.shape[2]):
pt = (midpt[0] - rad + d0, midpt[1] - rad + d1,
midpt[2] - rad + d2)
assert bcast.mask[pt] == msk[d0, d1, d2]
# Return midpoint
return midpt
def block_test_sfn(l, msk, myrad, bcast_var, extra_params):
outmat = l[0][:, :, :, 0]
outmat[~msk] = None
if myrad == 0:
return outmat
else:
return outmat[myrad:-myrad, myrad:-myrad, myrad:-myrad]
def test_correctness(): # noqa: C901
def voxel_test(data, mask, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
# Initialize dataset with known pattern
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array(
[d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(MaskRadBcast(mask, rad))
global_outputs = sl.run_searchlight(voxel_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0]-rad):
for d1 in range(rad, global_outputs.shape[1]-rad):
for d2 in range(rad, global_outputs.shape[2]-rad):
if mask[d0, d1, d2]:
assert np.array_equal(
np.array(global_outputs[d0, d1, d2]),
np.array([d0, d1, d2]))
def block_test(data, mask, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
# Initialize dataset with known pattern
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array(
[d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(mask)
global_outputs = sl.run_block_function(block_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0]-rad):
for d1 in range(rad, global_outputs.shape[1]-rad):
for d2 in range(rad, global_outputs.shape[2]-rad):
if mask[d0, d1, d2]:
assert np.array_equal(
np.array(global_outputs[d0, d1, d2]),
np.array([d0, d1, d2, 0]))
# Create dataset
def do_test(dim0, dim1, dim2, ntr, nsubj, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
mask = np.random.choice([True, False], (dim0, dim1, dim2))
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
voxel_test(data, mask, max_blk_edge, rad)
block_test(data, mask, max_blk_edge, rad)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=1)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=0)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=5, max_blk_edge=4, rad=1)
do_test(dim0=1, dim1=5, dim2=9, ntr=5, nsubj=5, max_blk_edge=4, rad=1)
do_test(dim0=0, dim1=10, dim2=8, ntr=5, nsubj=5, max_blk_edge=4, rad=1)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=2)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=3)
| |
from math import log, exp
from collections import defaultdict, Counter
from zipfile import ZipFile
import re
kNEG_INF = -1e6
kSTART = "<s>"
kEND = "</s>"
kWORDS = re.compile("[a-z]{1,}")
kREP = set(["Bush", "GWBush", "Eisenhower", "Ford", "Nixon", "Reagan"])
kDEM = set(["Carter", "Clinton", "Truman", "Johnson", "Kennedy"])
class OutOfVocab(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def sentences_from_zipfile(zip_file, filter_presidents):
"""
Given a zip file, yield an iterator over the lines in each file in the
zip file.
"""
with ZipFile(zip_file) as z:
for ii in z.namelist():
try:
pres = ii.replace(".txt", "").replace("state_union/", "").split("-")[1]
except IndexError:
continue
if pres in filter_presidents:
for jj in z.read(ii).decode(errors='replace').split("\n")[3:]:
yield jj.lower()
def tokenize(sentence):
"""
Given a sentence, return a list of all the words in the sentence.
"""
return kWORDS.findall(sentence.lower())
def bigrams(sentence):
"""
Given a sentence, generate all bigrams in the sentence.
"""
for ii, ww in enumerate(sentence[:-1]):
yield ww, sentence[ii + 1]
class BigramLanguageModel:
def __init__(self):
self._vocab = set([kSTART, kEND])
# Add your code here!
# Bigram counts
self._vocab_final = False
def train_seen(self, word):
"""
Tells the language model that a word has been seen. This
will be used to build the final vocabulary.
"""
assert not self._vocab_final, \
"Trying to add new words to finalized vocab"
# Add your code here!
def generate(self, context):
"""
Given the previous word of a context, generate a next word from its
conditional language model probability.
"""
# Add your code here. Make sure to the account for the case
# of a context you haven't seen before and Don't forget the
# smoothing "+1" term while sampling.
# Your code here
return "the"
def sample(self, sample_size):
"""
Generate an English-like string from a language model of a specified
length (plus start and end tags).
"""
# You should not need to modify this function
yield kSTART
next = kSTART
for ii in range(sample_size):
next = self.generate(next)
if next == kEND:
break
else:
yield next
yield kEND
def finalize(self):
"""
Fixes the vocabulary as static, prevents keeping additional vocab from
being added
"""
# you should not need to modify this function
self._vocab_final = True
def tokenize_and_censor(self, sentence):
"""
Given a sentence, yields a sentence suitable for training or testing.
Prefix the sentence with <s>, generate the words in the
sentence, and end the sentence with </s>.
"""
# you should not need to modify this function
yield kSTART
for ii in tokenize(sentence):
if ii not in self._vocab:
raise OutOfVocab(ii)
yield ii
yield kEND
def vocab(self):
"""
Returns the language model's vocabulary
"""
assert self._vocab_final, "Vocab not finalized"
return list(sorted(self._vocab))
def laplace(self, context, word):
"""
Return the log probability (base e) of a word given its context
"""
assert context in self._vocab, "%s not in vocab" % context
assert word in self._vocab, "%s not in vocab" % word
# Add your code here
val = 0.0
return val
def add_train(self, sentence):
"""
Add the counts associated with a sentence.
"""
# You'll need to complete this function, but here's a line of code that
# will hopefully get you started.
for context, word in bigrams(list(self.tokenize_and_censor(sentence))):
None
# ---------------------------------------
assert word in self._vocab, "%s not in vocab" % word
def log_likelihood(self, sentence):
"""
Compute the log likelihood of a sentence, divided by the number of
tokens in the sentence.
"""
return 0.0
if __name__ == "__main__":
dem_lm = BigramLanguageModel()
rep_lm = BigramLanguageModel()
for target, pres, name in [(dem_lm, kDEM, "D"), (rep_lm, kREP, "R")]:
for sent in sentences_from_zipfile("../data/state_union.zip", pres):
for ww in tokenize(sent):
target.train_seen(ww)
print("Done looking at %s words, finalizing vocabulary" % name)
target.finalize()
for sent in sentences_from_zipfile("../data/state_union.zip", pres):
target.add_train(sent)
print("Trained language model for %s" % name)
with open("../data/2016-obama.txt", encoding='utf8') as infile:
print("REP\t\tDEM\t\tSentence\n" + "=" * 80)
for ii in infile:
if len(ii) < 15: # Ignore short sentences
continue
try:
dem_score = dem_lm.log_likelihood(ii)
rep_score = rep_lm.log_likelihood(ii)
print("%f\t%f\t%s" % (dem_score, rep_score, ii.strip()))
except OutOfVocab:
None
| |
import os
import re
from smtplib import SMTPException
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.contrib.auth import forms as auth_forms
from django.forms.util import ErrorList
import captcha.fields
import commonware.log
import happyforms
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
from amo.utils import log_cef, slug_validator
from .models import (UserProfile, UserNotification, BlacklistedUsername,
BlacklistedEmailDomain, BlacklistedPassword, DjangoUser)
from .widgets import NotificationsSelectMultiple
import users.notifications as email
from . import tasks
log = commonware.log.getLogger('z.users')
admin_re = re.compile('(?=.*\d)(?=.*[a-zA-Z])')
class PasswordMixin:
min_length = 8
error_msg = {'min_length': _('Must be %s characters or more.')
% min_length}
@classmethod
def widget(cls, **kw):
return forms.PasswordInput(attrs={'class': 'password-strength',
'data-min-length': cls.min_length},
**kw)
def clean_password(self, field='password', instance='instance'):
data = self.cleaned_data[field]
if not data:
return data
user = getattr(self, instance, None)
if user and user.pk and user.needs_tougher_password:
if not admin_re.search(data):
raise forms.ValidationError(_('Letters and numbers required.'))
if BlacklistedPassword.blocked(data):
raise forms.ValidationError(_('That password is not allowed.'))
return data
class AuthenticationForm(auth_forms.AuthenticationForm):
username = forms.CharField(max_length=50)
rememberme = forms.BooleanField(required=False)
recaptcha = captcha.fields.ReCaptchaField()
recaptcha_shown = forms.BooleanField(widget=forms.HiddenInput,
required=False)
def __init__(self, request=None, use_recaptcha=False, *args, **kw):
super(AuthenticationForm, self).__init__(*args, **kw)
if not use_recaptcha or not settings.RECAPTCHA_PRIVATE_KEY:
del self.fields['recaptcha']
class PasswordResetForm(auth_forms.PasswordResetForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data['email']
self.users_cache = UserProfile.objects.filter(email__iexact=email)
if not self.users_cache:
raise forms.ValidationError(
_("""An email has been sent to the requested account with
further information. If you do not receive an email then
please confirm you have entered the same email address used
during account registration."""))
return email
def save(self, **kw):
for user in self.users_cache:
log.info(u'Password reset email sent for user (%s)' % user)
if user.needs_tougher_password:
log_cef('Password Reset', 5, self.request,
username=user,
signature='PASSWORDRESET',
msg='Privileged user requested password reset')
else:
log_cef('Password Reset', 5, self.request,
username=user,
signature='PASSWORDRESET',
msg='User requested password reset')
try:
# Django calls send_mail() directly and has no option to pass
# in fail_silently, so we have to catch the SMTP error ourselves
super(PasswordResetForm, self).save(**kw)
except SMTPException, e:
log.error("Failed to send mail for (%s): %s" % (user, e))
class SetPasswordForm(auth_forms.SetPasswordForm, PasswordMixin):
new_password1 = forms.CharField(label=_lazy(u'New password'),
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget())
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(SetPasswordForm, self).__init__(*args, **kwargs)
# We store our password in the users table, not auth_user like
# Django expects.
if isinstance(self.user, DjangoUser):
self.user = self.user.get_profile()
def clean_new_password1(self):
return self.clean_password(field='new_password1', instance='user')
def save(self, **kw):
# Three different loggers? :(
amo.log(amo.LOG.CHANGE_PASSWORD, user=self.user)
log.info(u'User (%s) changed password with reset form' % self.user)
log_cef('Password Changed', 5, self.request,
username=self.user.username, signature='PASSWORDCHANGED',
msg='User changed password')
super(SetPasswordForm, self).save(**kw)
class UserDeleteForm(forms.Form):
password = forms.CharField(max_length=255, required=True,
widget=forms.PasswordInput(render_value=False))
confirm = forms.BooleanField(required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UserDeleteForm, self).__init__(*args, **kwargs)
def clean_password(self):
data = self.cleaned_data
amouser = self.request.user.get_profile()
if not amouser.check_password(data["password"]):
raise forms.ValidationError(_("Wrong password entered!"))
def clean(self):
amouser = self.request.user.get_profile()
if amouser.is_developer:
# This is tampering because the form isn't shown on the page if the
# user is a developer
log.warning(u'[Tampering] Attempt to delete developer account (%s)'
% self.request.user)
raise forms.ValidationError("")
class UsernameMixin:
def clean_username(self):
name = self.cleaned_data['username']
slug_validator(name, lower=False,
message=_('Enter a valid username consisting of letters, numbers, '
'underscores or hyphens.'))
if BlacklistedUsername.blocked(name):
raise forms.ValidationError(_('This username cannot be used.'))
# FIXME: Bug 858452. Remove this check when collation of the username
# column is changed to case insensitive.
if (UserProfile.objects.exclude(id=self.instance.id)
.filter(username__iexact=name).exists()):
raise forms.ValidationError(_('This username is already in use.'))
return name
class UserRegisterForm(happyforms.ModelForm, UsernameMixin, PasswordMixin):
"""
For registering users. We're not building off
d.contrib.auth.forms.UserCreationForm because it doesn't do a lot of the
details here, so we'd have to rewrite most of it anyway.
"""
username = forms.CharField(max_length=50)
display_name = forms.CharField(label=_lazy(u'Display Name'), max_length=50,
required=False)
location = forms.CharField(label=_lazy(u'Location'), max_length=100,
required=False)
occupation = forms.CharField(label=_lazy(u'Occupation'), max_length=100,
required=False)
password = forms.CharField(max_length=255,
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(render_value=False))
password2 = forms.CharField(max_length=255,
widget=forms.PasswordInput(render_value=False))
recaptcha = captcha.fields.ReCaptchaField()
homepage = forms.URLField(label=_lazy(u'Homepage'), required=False)
class Meta:
model = UserProfile
fields = ('username', 'display_name', 'location', 'occupation',
'password', 'password2', 'recaptcha', 'homepage', 'email',
'emailhidden')
def __init__(self, *args, **kwargs):
super(UserRegisterForm, self).__init__(*args, **kwargs)
if not settings.RECAPTCHA_PRIVATE_KEY:
del self.fields['recaptcha']
errors = {'invalid': _('This URL has an invalid format. '
'Valid URLs look like '
'http://example.com/my_page.')}
self.fields['homepage'].error_messages = errors
def clean_email(self):
d = self.cleaned_data['email'].split('@')[-1]
if BlacklistedEmailDomain.blocked(d):
raise forms.ValidationError(_('Please use an email address from a '
'different provider to complete '
'your registration.'))
return self.cleaned_data['email']
def clean(self):
super(UserRegisterForm, self).clean()
data = self.cleaned_data
# Passwords
p1 = data.get('password')
p2 = data.get('password2')
# If p1 is invalid because its blocked, this message is non sensical.
if p1 and p1 != p2:
msg = _('The passwords did not match.')
self._errors['password2'] = ErrorList([msg])
if p2:
del data['password2']
return data
class UserEditForm(UserRegisterForm, PasswordMixin):
oldpassword = forms.CharField(max_length=255, required=False,
widget=forms.PasswordInput(render_value=False))
password = forms.CharField(max_length=255, required=False,
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(render_value=False))
password2 = forms.CharField(max_length=255, required=False,
widget=forms.PasswordInput(render_value=False))
photo = forms.FileField(label=_lazy(u'Profile Photo'), required=False)
notifications = forms.MultipleChoiceField(
choices=[],
widget=NotificationsSelectMultiple,
initial=email.NOTIFICATIONS_DEFAULT,
required=False)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UserEditForm, self).__init__(*args, **kwargs)
if self.instance:
default = dict((i, n.default_checked) for i, n
in email.NOTIFICATIONS_BY_ID.items())
user = dict((n.notification_id, n.enabled) for n
in self.instance.notifications.all())
default.update(user)
# Add choices to Notification.
choices = email.NOTIFICATIONS_CHOICES
if not self.instance.is_developer:
choices = email.NOTIFICATIONS_CHOICES_NOT_DEV
# Append a "NEW" message to new notification options.
saved = self.instance.notifications.values_list('notification_id',
flat=True)
self.choices_status = {}
for idx, label in choices:
self.choices_status[idx] = idx not in saved
self.fields['notifications'].choices = choices
self.fields['notifications'].initial = [i for i, v
in default.items() if v]
self.fields['notifications'].widget.form_instance = self
# TODO: We should inherit from a base form not UserRegisterForm
if self.fields.get('recaptcha'):
del self.fields['recaptcha']
class Meta:
model = UserProfile
exclude = ('password', 'picture_type')
def clean(self):
data = self.cleaned_data
amouser = self.request.user.get_profile()
# Passwords
p1 = data.get("password")
p2 = data.get("password2")
if p1 or p2:
if not amouser.check_password(data["oldpassword"]):
msg = _("Wrong password entered!")
self._errors["oldpassword"] = ErrorList([msg])
del data["oldpassword"]
super(UserEditForm, self).clean()
return data
def clean_photo(self):
photo = self.cleaned_data['photo']
if not photo:
return
if photo.content_type not in ('image/png', 'image/jpeg'):
raise forms.ValidationError(
_('Images must be either PNG or JPG.'))
if photo.size > settings.MAX_PHOTO_UPLOAD_SIZE:
raise forms.ValidationError(
_('Please use images smaller than %dMB.' %
(settings.MAX_PHOTO_UPLOAD_SIZE / 1024 / 1024 - 1)))
return photo
def save(self, log_for_developer=True):
u = super(UserEditForm, self).save(commit=False)
data = self.cleaned_data
photo = data['photo']
if photo:
u.picture_type = 'image/png'
tmp_destination = u.picture_path + '__unconverted'
with storage.open(tmp_destination, 'wb') as fh:
for chunk in photo.chunks():
fh.write(chunk)
tasks.resize_photo.delay(tmp_destination, u.picture_path,
set_modified_on=[u])
if data['password']:
u.set_password(data['password'])
log_cef('Password Changed', 5, self.request, username=u.username,
signature='PASSWORDCHANGED', msg='User changed password')
if log_for_developer:
amo.log(amo.LOG.CHANGE_PASSWORD)
log.info(u'User (%s) changed their password' % u)
for (i, n) in email.NOTIFICATIONS_BY_ID.items():
enabled = n.mandatory or (str(i) in data['notifications'])
UserNotification.update_or_create(user=u, notification_id=i,
update={'enabled': enabled})
log.debug(u'User (%s) updated their profile' % u)
u.save()
return u
class BaseAdminUserEditForm(object):
def changed_fields(self):
"""Returns changed_data ignoring these fields."""
return (set(self.changed_data) -
set(['admin_log', 'notifications', 'photo',
'password', 'password2', 'oldpassword']))
def changes(self):
"""A dictionary of changed fields, old, new. Hides password."""
details = dict([(k, (self.initial[k], self.cleaned_data[k]))
for k in self.changed_fields()])
if 'password' in self.changed_data:
details['password'] = ['****', '****']
return details
def clean_anonymize(self):
if (self.cleaned_data['anonymize'] and
self.changed_fields() != set(['anonymize'])):
raise forms.ValidationError(_('To anonymize, enter a reason for'
' the change but do not change any'
' other field.'))
return self.cleaned_data['anonymize']
class AdminUserEditForm(BaseAdminUserEditForm, UserEditForm):
"""This is the form used by admins to edit users' info."""
admin_log = forms.CharField(required=True, label='Reason for change',
widget=forms.Textarea(attrs={'rows': 4}))
confirmationcode = forms.CharField(required=False, max_length=255,
label='Confirmation code')
notes = forms.CharField(required=False, label='Notes',
widget=forms.Textarea(attrs={'rows': 4}))
anonymize = forms.BooleanField(required=False)
def save(self, *args, **kw):
profile = super(AdminUserEditForm, self).save(log_for_developer=False)
if self.cleaned_data['anonymize']:
amo.log(amo.LOG.ADMIN_USER_ANONYMIZED, self.instance,
self.cleaned_data['admin_log'])
profile.anonymize() # This also logs
else:
amo.log(amo.LOG.ADMIN_USER_EDITED, self.instance,
self.cleaned_data['admin_log'], details=self.changes())
log.info('Admin edit user: %s changed fields: %s' %
(self.instance, self.changed_fields()))
if 'password' in self.changes():
log_cef('Password Changed', 5, self.request,
username=self.instance.username,
signature='PASSWORDRESET',
msg='Admin requested password reset',
cs1=self.request.amo_user.username,
cs1Label='AdminName')
return profile
class BlacklistedUsernameAddForm(forms.Form):
"""Form for adding blacklisted username in bulk fashion."""
usernames = forms.CharField(widget=forms.Textarea(
attrs={'cols': 40, 'rows': 16}))
def clean(self):
super(BlacklistedUsernameAddForm, self).clean()
data = self.cleaned_data
if 'usernames' in data:
data['usernames'] = os.linesep.join(
[s.strip() for s in data['usernames'].splitlines()
if s.strip()])
if 'usernames' not in data or data['usernames'] == '':
msg = 'Please enter at least one username to blacklist.'
self._errors['usernames'] = ErrorList([msg])
return data
class BlacklistedEmailDomainAddForm(forms.Form):
"""Form for adding blacklisted user e-mail domains in bulk fashion."""
domains = forms.CharField(
widget=forms.Textarea(attrs={'cols': 40, 'rows': 16}))
def clean(self):
super(BlacklistedEmailDomainAddForm, self).clean()
data = self.cleaned_data
if 'domains' in data:
l = filter(None, [s.strip() for s in data['domains'].splitlines()])
data['domains'] = os.linesep.join(l)
if not data.get('domains', ''):
msg = 'Please enter at least one e-mail domain to blacklist.'
self._errors['domains'] = ErrorList([msg])
return data
class ContactForm(happyforms.Form):
text = forms.CharField(widget=forms.Textarea())
class RemoveForm(happyforms.Form):
remove = forms.BooleanField()
| |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import sys
import os
import re
import shutil
import filecmp
from collections import namedtuple, OrderedDict
from itertools import groupby
import operator
from jinja2 import Environment, PackageLoader, select_autoescape
env = Environment(
loader=PackageLoader('templates', ''),
autoescape=select_autoescape([]),
keep_trailing_newline=True,
lstrip_blocks=True
)
gen_dir = os.path.dirname(os.path.abspath(__file__))
csv_dir = os.path.join(gen_dir, "csv")
dest_dir = os.path.abspath(os.path.join(gen_dir, "..", "src", "generated"))
tmp_dir = os.path.join(dest_dir, "tmp")
cpp_types = {
'Boolean': 'bool',
'Double': 'double',
'UInt8': 'uint8_t',
'UInt16': 'uint16_t',
'UInt32': 'uint32_t',
'Int8': 'int8_t',
'Int16': 'int16_t',
'Int32': 'int32_t',
'String': 'std::string',
'DBString': 'DBString',
'DBBitArray': 'DBBitArray',
}
# Additional Jinja 2 functions
def lcf_type(field, prefix=True):
if field.size == True:
if re.match(r'Count<(.*)>', field.type):
return "Count"
else:
return "Size"
if field.type == "DatabaseVersion":
return "DatabaseVersion"
if field.type == "EmptyBlock":
return "Empty"
return "Typed"
def cpp_type(ty, prefix=True):
if ty in cpp_types:
return cpp_types[ty]
if ty == "DatabaseVersion":
return 'int32_t'
if ty == "EmptyBlock":
return 'void'
m = re.match(r'Count<(.*)>', ty)
if m:
return cpp_type(m.group(1), prefix)
m = re.match(r'Array<(.*):(.*)>', ty)
if m:
return 'std::vector<%s>' % cpp_type(m.group(1), prefix)
m = re.match(r'(Vector|Array)<(.*)>', ty)
if m:
return 'std::vector<%s>' % cpp_type(m.group(2), prefix)
m = re.match(r'DBArray<(.*)>', ty)
if m:
return 'DBArray<%s>' % cpp_type(m.group(1), prefix)
m = re.match(r'Ref<(.*):(.*)>', ty)
if m:
return cpp_type(m.group(2), prefix)
m = re.match(r'Ref<(.*)>', ty)
if m:
return 'int32_t'
m = re.match(r'Enum<(.*)>', ty)
if m:
return 'int32_t'
m = re.match(r'(.*)_Flags$', ty)
if m:
ty = m.expand(r'\1::Flags')
if prefix:
ty = 'rpg::' + ty
return ty
if prefix:
ty = 'rpg::' + ty
return ty
def pod_default(field):
dfl = field.default
ftype = field.type
# No default? Then just default construct using C++ syntax.
if not dfl:
if ftype.startswith("UInt") or ftype.startswith("Int") or ftype.startswith("Double") or ftype.startswith("Boolean"):
raise RuntimeError(f"Type {ftype} requires a default value!")
return ""
# Not a POD, no default
if ftype.startswith('DBBitArray'):
return ""
# Inline python list syntax, if it parses, then convert to C++ initializer_list.
if ftype.startswith('Vector') or ftype.startswith('Array') or ftype.startswith('DBArray'):
try:
ilist = eval(dfl)
if isinstance(ilist, list):
return " = {" + ', '.join(str(x) for x in ilist) + "}"
except Exception as e:
pass
if ftype == 'Boolean':
dfl = dfl.lower()
if '|' in dfl:
dfl = -1
if ftype == 'Double':
dfl = float(dfl)
return " = " + str(dfl)
def num_flags(flag):
return len(flag)
def flag_size(flag):
return (len(flag) + 7) // 8
def flag_set(field, bit):
bit -= 1
try:
res = bool(int(field.default) & (1 << bit))
except ValueError:
# Default was not an int
res = False
return str(res).lower()
def filter_structs_without_codes(structs):
for struct in structs:
if all(f.code for f in sfields[struct.name]):
yield struct
def filter_unused_fields(fields):
for field in fields:
if field.type and field.type != "EmptyBlock":
yield field
def filter_unwritten_fields(fields):
for field in fields:
if field.type:
yield field
def filter_size_fields(fields):
for field in fields:
if not field.size:
yield field
# End of Jinja 2 functions
int_types = {
'UInt8': 'uint8_t',
'UInt16': 'uint16_t',
'UInt32': 'uint32_t',
'Int16': 'int16_t',
'Int32': 'int32_t'
}
def struct_headers(ty, header_map):
if ty == 'String':
return ['<string>']
if ty == 'DBString':
return ['"lcf/dbstring.h"']
if ty == 'DBBitArray':
return ['"lcf/dbbitarray.h"']
if ty in int_types or ty == "DatabaseVersion":
return ['<stdint.h>']
if ty in cpp_types:
return []
m = re.match(r'Ref<(.*):(.*)>', ty)
if m:
return struct_headers(m.group(2), header_map)
if re.match(r'Ref<(.*)>', ty):
return []
if re.match(r'Enum<(.*)>', ty):
return ['"lcf/enum_tags.h"']
if re.match(r'(.*)_Flags$', ty):
return ['<array>']
m = re.match(r'Array<(.*):(.*)>', ty)
if m:
return ['<vector>'] + struct_headers(m.group(1), header_map)
m = re.match(r'(Vector|Array)<(.*)>', ty)
if m:
return ['<vector>'] + struct_headers(m.group(2), header_map)
m = re.match(r'DBArray<(.*)>', ty)
if m:
return ['<lcf/dbarray.h>'] + struct_headers(m.group(1), header_map)
header = header_map.get(ty)
if header is not None:
return ['"lcf/rpg/%s.h"' % header]
if ty in ['Parameters', 'Equipment', 'EventCommand', 'MoveCommand', 'Rect', 'TreeMap']:
return ['"lcf/rpg/%s.h"' % ty.lower()]
return []
def merge_dicts(dicts):
# Merges multiple dicts into one
out_dict = dicts[0]
for d in dicts[1:]:
for k,v in d.items():
if k in out_dict:
# Append new values
for vv in v:
out_dict[k].append(vv)
else:
# Insert whole key
out_dict[k] = v
return out_dict
def process_file(filename, namedtup):
# Mapping is: All elements of the line grouped by the first column
path = os.path.join(csv_dir, filename)
df = pd.read_csv(path, comment='#', dtype=str)
df = df.fillna("")
lines = [ list(r) for _i, r in df.iterrows() ]
result = OrderedDict()
for k, g in groupby(lines, operator.itemgetter(0)):
result[k] = list(map(lambda x: namedtup(*x[1:]), list(g)))
return result
def get_structs(*filenames):
Struct = namedtuple("Struct", "name base hasid")
results = list(map(lambda x: process_file(x, Struct), filenames))
processed_result = OrderedDict()
for k, struct in merge_dicts(results).items():
processed_result[k] = []
for elem in struct:
elem = Struct(elem.name, elem.base, bool(int(elem.hasid)) if elem.hasid else None)
processed_result[k].append(elem)
processed_flat = []
for filetype, struct in processed_result.items():
for elem in struct:
processed_flat.append(elem)
return processed_result, processed_flat
def get_fields(*filenames):
Field = namedtuple("Field", "name size type code default presentifdefault is2k3 comment")
results = list(map(lambda x: process_file(x, Field), filenames))
processed_result = OrderedDict()
for k, field in merge_dicts(results).items():
processed_result[k] = []
for elem in field:
elem = Field(
elem.name,
True if elem.size == 't' else False,
elem.type,
0 if elem.code == '' else int(elem.code, 0),
elem.default,
elem.presentifdefault,
elem.is2k3,
elem.comment)
processed_result[k].append(elem)
return processed_result
def get_enums(*filenames):
results = list(map(lambda x: process_file(x, namedtuple("Enum", "entry value index")), filenames))
new_result = OrderedDict()
# Additional processing to group by the Enum Entry
# Results in e.g. EventCommand -> Code -> List of (Name, Index)
for k, v in merge_dicts(results).items():
new_result[k] = OrderedDict()
for kk, gg in groupby(v, operator.attrgetter("entry")):
new_result[k][kk] = list(map(lambda x: (x.value, x.index), gg))
return new_result
def get_flags(*filenames):
results = list(map(lambda x: process_file(x, namedtuple("Flag", "field is2k3")), filenames))
return merge_dicts(results)
def get_functions(*filenames):
Function = namedtuple("Function", "method static headers")
results = list(map(lambda x: process_file(x, Function), filenames))
processed_result = OrderedDict()
for k, field in merge_dicts(results).items():
processed_result[k] = []
for elem in field:
elem = Function(
elem.method,
elem.static == 't',
elem.headers)
processed_result[k].append(elem)
return processed_result
def get_constants(filename='constants.csv'):
return process_file(filename, namedtuple("Constant", "name type value comment"))
def get_headers():
header_map = dict()
for filetype, struct in structs.items():
for elem in struct:
header_map[elem.name] = elem.name.lower()
result = {}
for struct in structs_flat:
struct_name = struct.name
if struct_name not in sfields:
continue
struct_result = result.setdefault(struct_name, [])
struct_base = struct.base
if struct_base:
struct_result.append('"lcf/rpg/{}.h"'.format(struct_base.lower()))
headers = set()
for field in sfields[struct_name]:
ftype = field.type
if not ftype:
continue
headers.update(struct_headers(ftype, header_map))
if struct_name in functions:
for s in functions[struct_name]:
if s.headers:
headers.update([s.headers])
struct_result += sorted(x for x in headers if x[0] == '<') + sorted(x for x in headers if x[0] == '"')
return result
def type_is_db_string(ty):
return ty == 'DBString'
def type_is_array(ty):
return re.match(r'(Vector|Array|DBArray)<(.*)>', ty) or ty == "DBBitArray"
def type_is_struct(ty):
return ty in [ x.name for x in structs_flat ]
def type_is_array_of_struct(ty):
m = re.match(r'(Vector|Array|DBArray)<(.*)>', ty)
return m and type_is_struct(m.group(2))
def is_monotonic_from_0(enum):
expected = 0
for (val, idx) in enum:
if int(idx) != expected:
return False
expected += 1
return True
def openToRender(path):
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
return open(path, 'w')
def generate():
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
for filetype in ['ldb','lmt','lmu','lsd']:
filepath = os.path.join(tmp_dir, 'lcf', filetype, 'chunks.h')
with openToRender(filepath) as f:
f.write(chunk_tmpl.render(
type=filetype
))
filepath = os.path.join(tmp_dir, 'fwd_struct_impl.h')
with openToRender(filepath) as f:
f.write(fwd_struct_tmpl.render(
structs=sorted([x.name for x in structs_flat])
))
filepath = os.path.join(tmp_dir, 'lcf', 'rpg', 'fwd.h')
with openToRender(filepath) as f:
f.write(fwd_tmpl.render(
structs=sorted([x.name for x in structs_flat])
))
for filetype, structlist in structs.items():
for struct in structlist:
filename = struct.name.lower()
if struct.hasid is not None:
if struct.name not in sfields:
continue
filepath = os.path.join(tmp_dir, '%s_%s.cpp' % (filetype, filename))
with openToRender(filepath) as f:
f.write(lcf_struct_tmpl.render(
struct_name=struct.name,
struct_base=struct.base,
type=filetype
))
filepath = os.path.join(tmp_dir, 'lcf', 'rpg', '%s.h' % filename)
with openToRender(filepath) as f:
f.write(rpg_header_tmpl.render(
struct_name=struct.name,
struct_base=struct.base,
has_id=struct.hasid
))
filepath = os.path.join(tmp_dir, 'rpg_%s.cpp' % filename)
with openToRender(filepath) as f:
f.write(rpg_source_tmpl.render(
struct_name=struct.name,
struct_base=struct.base,
filename=filename
))
if struct.name in flags:
filepath = os.path.join(tmp_dir, '%s_%s_flags.h' % (filetype, filename))
with openToRender(filepath) as f:
f.write(flags_tmpl.render(
struct_name=struct.name,
type=filetype
))
filepath = os.path.join(tmp_dir, 'rpg_enums.cpp')
with openToRender(filepath) as f:
f.write(enums_tmpl.render())
for dirname, subdirlist, filelist in os.walk(tmp_dir, topdown=False):
subdir = os.path.relpath(dirname, tmp_dir)
for tmp_file in filelist:
tmp_path = os.path.join(tmp_dir, subdir, tmp_file)
dest_path = os.path.join(dest_dir, subdir, tmp_file)
dest_subdir = os.path.dirname(dest_path)
if not os.path.exists(dest_subdir):
os.mkdir(dest_subdir)
if not (os.path.exists(dest_path) and filecmp.cmp(tmp_path, dest_path)):
shutil.copyfile(tmp_path, dest_path)
os.remove(tmp_path)
os.rmdir(os.path.join(dirname))
def main(argv):
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
global structs, structs_flat, sfields, enums, flags, functions, constants, headers
global chunk_tmpl, lcf_struct_tmpl, rpg_header_tmpl, rpg_source_tmpl, flags_tmpl, enums_tmpl, fwd_tmpl, fwd_struct_tmpl
structs, structs_flat = get_structs('structs.csv','structs_easyrpg.csv')
sfields = get_fields('fields.csv','fields_easyrpg.csv')
enums = get_enums('enums.csv','enums_easyrpg.csv')
flags = get_flags('flags.csv')
functions = get_functions('functions.csv')
constants = get_constants()
headers = get_headers()
# Setup Jinja
env.filters["lcf_type"] = lcf_type
env.filters["cpp_type"] = cpp_type
env.filters["pod_default"] = pod_default
env.filters["struct_has_code"] = filter_structs_without_codes
env.filters["field_is_used"] = filter_unused_fields
env.filters["field_is_written"] = filter_unwritten_fields
env.filters["field_is_not_size"] = filter_size_fields
env.filters["num_flags"] = num_flags
env.filters["flag_size"] = flag_size
env.filters["flag_set"] = flag_set
env.tests['monotonic_from_0'] = is_monotonic_from_0
env.tests['is_db_string'] = type_is_db_string
env.tests['is_array'] = type_is_array
env.tests['is_array_of_struct'] = type_is_array_of_struct
env.tests['is_struct'] = type_is_struct
globals = dict(
structs=structs,
structs_flat=structs_flat,
fields=sfields,
flags=flags,
enums=enums,
functions=functions,
constants=constants,
headers=headers
)
chunk_tmpl = env.get_template('chunks.tmpl', globals=globals)
lcf_struct_tmpl = env.get_template('reader.tmpl', globals=globals)
rpg_header_tmpl = env.get_template('rpg_header.tmpl', globals=globals)
rpg_source_tmpl = env.get_template('rpg_source.tmpl', globals=globals)
flags_tmpl = env.get_template('flag_reader.tmpl', globals=globals)
enums_tmpl = env.get_template('rpg_enums.tmpl', globals=globals)
fwd_tmpl = env.get_template('fwd.tmpl', globals=globals)
fwd_struct_tmpl = env.get_template('fwd_struct.tmpl', globals=globals)
generate()
if __name__ == '__main__':
main(sys.argv)
| |
from django.db.models import Q
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse, HttpResponseRedirect
from django.urls.base import reverse
from django.utils import timezone
from django.utils.translation import ugettext as _, ngettext
from django.views.generic import (
FormView, ListView, RedirectView, View
)
from django.views.generic.edit import FormMixin
from apps.core.models import OmicsArea, PixelSet, Tag
from ..forms import (
PixelSetFiltersForm, PixelSetExportForm,
PixelSetSelectForm, SessionPixelSetSelectForm
)
from ..utils import export_pixelsets
from .helpers import (
get_selected_pixel_sets_from_session, set_selected_pixel_sets_to_session
)
from .views_selection import GetSearchTermsMixin
class PixelSetListView(LoginRequiredMixin, FormMixin, ListView):
form_class = PixelSetFiltersForm
model = PixelSet
paginate_by = 10
template_name = 'explorer/pixelset_list.html'
def get_form_kwargs(self):
kwargs = {
'initial': self.get_initial(),
'prefix': self.get_prefix(),
}
if self.request.method == 'GET':
kwargs.update({
'data': self.request.GET,
})
return kwargs
def get_queryset(self):
qs = super().get_queryset()
form = self.get_form()
if form.is_valid():
species = form.cleaned_data.get('species')
if species:
qs = qs.filter(
pixel__omics_unit__strain__species__id__in=species
)
omics_unit_types = form.cleaned_data.get('omics_unit_types')
if omics_unit_types:
qs = qs.filter(
pixel__omics_unit__type__id__in=omics_unit_types
)
parent_omics_areas = form.cleaned_data.get('omics_areas')
if parent_omics_areas:
omics_areas = OmicsArea.objects.get_queryset_descendants(
parent_omics_areas,
include_self=True
)
qs = qs.filter(
analysis__experiments__omics_area__id__in=omics_areas
)
parent_tags = form.cleaned_data.get('tags')
if parent_tags:
# Add descendants to the tags queryset
tags = Tag.objects.filter(
id__in=parent_tags
).with_descendants()
qs = qs.filter(
Q(analysis__tags__id__in=tags) |
Q(analysis__experiments__tags__id__in=tags)
)
search = form.cleaned_data.get('search')
if len(search):
qs = qs.filter(
Q(analysis__id__istartswith=search) |
Q(analysis__experiments__description__icontains=search) |
Q(analysis__description__icontains=search) |
Q(pixel__omics_unit__reference__identifier__iexact=search)
)
# optimize db queries
qs = qs.select_related(
'analysis',
'analysis__pixeler',
).prefetch_related(
'analysis__experiments__tags',
'analysis__tags',
)
return qs.distinct()
def get_context_data(self, **kwargs):
selected_pixelsets = get_selected_pixel_sets_from_session(
self.request.session
)
if len(selected_pixelsets):
selected_pixelsets = PixelSet.objects.filter(
id__in=selected_pixelsets
)
context = super().get_context_data(**kwargs)
context.update({
'export_form': PixelSetExportForm(),
'select_form': PixelSetSelectForm(),
'selected_pixelsets': selected_pixelsets,
})
return context
class PixelSetClearView(LoginRequiredMixin, RedirectView):
http_method_names = ['post', ]
def get_redirect_url(self, *args, **kwargs):
return self.request.POST.get(
'redirect_to',
reverse('explorer:pixelset_list')
)
def post(self, request, *args, **kwargs):
set_selected_pixel_sets_to_session(request.session, [])
messages.success(
request,
_("Pixel Set selection has been cleared.")
)
return super().post(request, *args, **kwargs)
class PixelSetDeselectView(LoginRequiredMixin, FormView):
form_class = SessionPixelSetSelectForm
http_method_names = ['post', ]
def get_success_url(self):
return self.request.POST.get(
'redirect_to',
reverse('explorer:pixelset_list')
)
def get_form(self, form_class=None):
"""Instanciate the form with appropriate pixel set choices
(i.e. pixel sets stored in session)
"""
if form_class is None:
form_class = self.get_form_class()
session_pixel_sets = get_selected_pixel_sets_from_session(
self.request.session
)
return form_class(session_pixel_sets, **self.get_form_kwargs())
def form_valid(self, form):
session_pixel_sets = get_selected_pixel_sets_from_session(
self.request.session
)
pixel_set = form.cleaned_data['pixel_set']
session_pixel_sets.remove(pixel_set)
set_selected_pixel_sets_to_session(
self.request.session,
pixel_sets=session_pixel_sets
)
messages.success(
self.request,
_("Pixel Set {} has been removed from selection.").format(
pixel_set
)
)
return super().form_valid(form)
def form_invalid(self, form):
messages.error(
self.request,
'\n'.join([
f'{v[0]}' for (k, v) in form.errors.items()
])
)
return HttpResponseRedirect(self.get_success_url())
class PixelSetSelectView(LoginRequiredMixin, FormView):
form_class = PixelSetSelectForm
http_method_names = ['post', ]
def get_success_url(self):
return self.request.POST.get(
'redirect_to',
reverse('explorer:pixelset_list')
)
def form_valid(self, form):
selection = list(
set(
get_selected_pixel_sets_from_session(self.request.session) + [
str(p.id) for p in form.cleaned_data['pixel_sets']
]
)
)
set_selected_pixel_sets_to_session(
self.request.session,
pixel_sets=selection
)
nb_pixelsets = len(form.cleaned_data['pixel_sets'])
messages.success(
self.request,
ngettext(
'%(count)d Pixel Set has been added to your selection.',
'%(count)d Pixel Sets have been added to your selection.',
nb_pixelsets
) % {
'count': nb_pixelsets,
}
)
return super().form_valid(form)
def form_invalid(self, form):
messages.error(
self.request,
'\n'.join([
f'{v[0]}' for (k, v) in form.errors.items()
])
)
return HttpResponseRedirect(self.get_success_url())
class PixelSetExportView(LoginRequiredMixin, GetSearchTermsMixin, View):
ATTACHEMENT_FILENAME = 'pixelsets_{date_time}.zip'
SUBSET_QUERY_PARAM = 'only-subset'
@staticmethod
def get_export_archive_filename():
return PixelSetExportView.ATTACHEMENT_FILENAME.format(
date_time=timezone.now().strftime('%Y%m%d_%Hh%Mm%Ss')
)
def get(self, request, *args, **kwargs):
selection = get_selected_pixel_sets_from_session(self.request.session)
if not len(selection):
return self.empty_selection(request)
search_terms = []
# only take omics units into account if it is requested.
if request.GET.get(self.SUBSET_QUERY_PARAM, False):
search_terms = self.get_search_terms(self.request.session)
qs = PixelSet.objects.filter(id__in=selection)
content = export_pixelsets(
pixel_sets=qs,
search_terms=search_terms,
).getvalue()
response = HttpResponse(content, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename={}'.format(
self.get_export_archive_filename()
)
return response
def empty_selection(self, request):
messages.error(
request,
_("Cannot export an empty selection.")
)
return HttpResponseRedirect(reverse('explorer:pixelset_list'))
| |
from Child import Child
from Node import Node # noqa: I201
STMT_NODES = [
# continue-stmt -> 'continue' label? ';'?
Node('ContinueStmt', kind='Stmt',
children=[
Child('ContinueKeyword', kind='ContinueToken'),
Child('Label', kind='IdentifierToken',
is_optional=True),
]),
# while-stmt -> label? ':'? 'while' condition-list code-block ';'?
Node('WhileStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('WhileKeyword', kind='WhileToken'),
Child('Conditions', kind='ConditionElementList'),
Child('Body', kind='CodeBlock'),
]),
# defer-stmt -> 'defer' code-block ';'?
Node('DeferStmt', kind='Stmt',
traits=['WithCodeBlock'],
children=[
Child('DeferKeyword', kind='DeferToken'),
Child('Body', kind='CodeBlock'),
]),
# expr-stmt -> expression ';'?
Node('ExpressionStmt', kind='Stmt',
children=[
Child('Expression', kind='Expr'),
]),
# switch-case-list -> switch-case switch-case-list?
Node('SwitchCaseList', kind='SyntaxCollection',
element='Syntax',
element_choices=['SwitchCase', 'IfConfigDecl']),
# repeat-while-stmt -> label? ':'? 'repeat' code-block 'while' expr ';'?
Node('RepeatWhileStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('RepeatKeyword', kind='RepeatToken'),
Child('Body', kind='CodeBlock'),
Child('WhileKeyword', kind='WhileToken'),
Child('Condition', kind='Expr'),
]),
# guard-stmt -> 'guard' condition-list 'else' code-block ';'?
Node('GuardStmt', kind='Stmt',
traits=['WithCodeBlock'],
children=[
Child('GuardKeyword', kind='GuardToken'),
Child('Conditions', kind='ConditionElementList'),
Child('ElseKeyword', kind='ElseToken'),
Child('Body', kind='CodeBlock'),
]),
Node('WhereClause', kind='Syntax',
children=[
Child('WhereKeyword', kind='WhereToken'),
Child('GuardResult', kind='Expr'),
]),
# for-in-stmt -> label? ':'? 'for' 'case'? pattern 'in' expr 'where'?
# expr code-block ';'?
Node('ForInStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('ForKeyword', kind='ForToken'),
Child('CaseKeyword', kind='CaseToken',
is_optional=True),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('InKeyword', kind='InToken'),
Child('SequenceExpr', kind='Expr'),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('Body', kind='CodeBlock'),
]),
# switch-stmt -> identifier? ':'? 'switch' expr '{'
# switch-case-list '}' ';'?
Node('SwitchStmt', kind='Stmt',
traits=['Braced', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('SwitchKeyword', kind='SwitchToken'),
Child('Expression', kind='Expr'),
Child('LeftBrace', kind='LeftBraceToken'),
Child('Cases', kind='SwitchCaseList'),
Child('RightBrace', kind='RightBraceToken'),
]),
# catch-clause-list -> catch-clause catch-clause-list?
Node('CatchClauseList', kind='SyntaxCollection',
element='CatchClause'),
# do-stmt -> identifier? ':'? 'do' code-block catch-clause-list ';'?
Node('DoStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('DoKeyword', kind='DoToken'),
Child('Body', kind='CodeBlock'),
Child('CatchClauses', kind='CatchClauseList',
is_optional=True),
]),
# return-stmt -> 'return' expr? ';'?
Node('ReturnStmt', kind='Stmt',
children=[
Child('ReturnKeyword', kind='ReturnToken'),
Child('Expression', kind='Expr',
is_optional=True),
]),
# fallthrough-stmt -> 'fallthrough' ';'?
Node('FallthroughStmt', kind='Stmt',
children=[
Child('FallthroughKeyword', kind='FallthroughToken'),
]),
# break-stmt -> 'break' identifier? ';'?
Node('BreakStmt', kind='Stmt',
children=[
Child('BreakKeyword', kind='BreakToken'),
Child('Label', kind='IdentifierToken',
is_optional=True),
]),
# case-item-list -> case-item case-item-list?
Node('CaseItemList', kind='SyntaxCollection',
element='CaseItem'),
# condition -> expression
# | availability-condition
# | case-condition
# | optional-binding-condition
Node('ConditionElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Condition', kind='Syntax',
node_choices=[
Child('Expression', kind='Expr'),
Child('Availablity', kind='AvailabilityCondition'),
Child('MatchingPattern',
kind='MatchingPatternCondition'),
Child('OptionalBinding',
kind='OptionalBindingCondition'),
]),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
Node('AvailabilityCondition', kind='Syntax',
children=[
Child('PoundAvailableKeyword', kind='PoundAvailableToken'),
Child('Arguments', kind='TokenList'),
]),
Node('MatchingPatternCondition', kind='Syntax',
children=[
Child('CaseKeyword', kind='CaseToken'),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('Initializer', kind='InitializerClause'),
]),
Node('OptionalBindingCondition', kind='Syntax',
children=[
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken', 'VarToken',
]),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('Initializer', kind='InitializerClause'),
]),
# condition-list -> condition
# | condition ','? condition-list
Node('ConditionElementList', kind='SyntaxCollection',
element='ConditionElement'),
# A declaration in statement position.
# struct Foo {};
Node('DeclarationStmt', kind='Stmt',
children=[
Child('Declaration', kind='Decl'),
]),
# throw-stmt -> 'throw' expr ';'?
Node('ThrowStmt', kind='Stmt',
children=[
Child('ThrowKeyword', kind='ThrowToken'),
Child('Expression', kind='Expr'),
]),
# if-stmt -> identifier? ':'? 'if' condition-list code-block
# else-clause ';'?
Node('IfStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('IfKeyword', kind='IfToken'),
Child('Conditions', kind='ConditionElementList'),
Child('Body', kind='CodeBlock'),
Child('ElseKeyword', kind='ElseToken',
is_optional=True),
Child('ElseBody', kind='Syntax',
node_choices=[
Child('IfStmt', kind='IfStmt'),
Child('CodeBlock', kind='CodeBlock'),
],
is_optional=True),
]),
# else-if-continuation -> label? ':'? 'while' condition-list code-block ';'
Node('ElseIfContinuation', kind='Syntax',
children=[
Child('IfStatement', kind='IfStmt'),
]),
# else-clause -> 'else' code-block
Node('ElseBlock', kind='Syntax',
traits=['WithCodeBlock'],
children=[
Child('ElseKeyword', kind='ElseToken'),
Child('Body', kind='CodeBlock'),
]),
# switch-case -> unknown-attr? switch-case-label stmt-list
# | unknown-attr? switch-default-label stmt-list
Node('SwitchCase', kind='Syntax',
traits=['WithStatements'],
children=[
Child('UnknownAttr', kind='Attribute', is_optional=True),
Child('Label', kind='Syntax',
node_choices=[
Child('Default', kind='SwitchDefaultLabel'),
Child('Case', kind='SwitchCaseLabel'),
]),
Child('Statements', kind='CodeBlockItemList'),
]),
# switch-default-label -> 'default' ':'
Node('SwitchDefaultLabel', kind='Syntax',
children=[
Child('DefaultKeyword', kind='DefaultToken'),
Child('Colon', kind='ColonToken'),
]),
# case-item -> pattern where-clause? ','?
Node('CaseItem', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Pattern', kind='Pattern'),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# switch-case-label -> 'case' case-item-list ':'
Node('SwitchCaseLabel', kind='Syntax',
children=[
Child('CaseKeyword', kind='CaseToken'),
Child('CaseItems', kind='CaseItemList'),
Child('Colon', kind='ColonToken'),
]),
# catch-clause 'catch' pattern? where-clause? code-block
Node('CatchClause', kind='Syntax',
children=[
Child('CatchKeyword', kind='CatchToken'),
Child('Pattern', kind='Pattern',
is_optional=True),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('Body', kind='CodeBlock'),
]),
]
| |
# -*- coding: utf-8 -*-
"""
Request Management System - Controllers
"""
module = request.controller
if module not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# Options Menu (available in all Functions' Views)
response.menu_options = [
[T("Home"), False, URL(r=request, f="index")],
[T("Requests"), False, URL(r=request, f="req")],
# [T("Requests"), False, URL(r=request, f="req"),
# [T("Add"), False, URL(r=request, f="req", args="create")],
# ],
[T("All Requested Items"), False, URL(r=request, f="ritem")],
[T("All Pledges"),False, URL(r=request, f="pledge")]
]
# S3 framework functions
def index():
"Module's Home Page"
""" Default to the rms_req list view - TODO does not work with paginate!!!"""
request.function = "req"
request.args = []
return req()
#module_name = deployment_settings.modules[module].name_nice
#return dict(module_name=module_name, a=1)
def req():
""" RESTful CRUD controller """
resource = request.function
tablename = module + "_" + resource
table = db[tablename]
# Don't send the locations list to client (pulled by AJAX instead)
table.location_id.requires = IS_NULL_OR(IS_ONE_OF_EMPTY(db, "gis_location.id"))
# Pre-processor
def prep(r):
if r.representation in ("html", "popup"):
if r.method == "create":
table.timestmp.default = request.utcnow
person = session.auth.user.id if auth.is_logged_in() else None
if person:
person_uuid = db(db.auth_user.id == person).select(db.auth_user.person_uuid, limitby=(0, 1)).first().person_uuid
person = db(db.pr_person.uuid == person_uuid).select(db.pr_person.id, limitby=(0, 1)).first().id
table.person_id.default = person
table.pledge_status.readable = False
elif r.method == "update":
table.pledge_status.readable = False
shn_action_buttons(r)
return True
response.s3.prep = prep
# Filter out non-actionable SMS requests:
#response.s3.filter = (db.rms_req.actionable == True) | (db.rms_req.source_type != 2) # disabled b/c Ushahidi no longer updating actionaable fielde
# Post-processor
def req_postp(jr, output):
if jr.representation in ("html", "popup"):
if not jr.component:
response.s3.actions = [
dict(label=str(T("Open")), _class="action-btn", url=str(URL(r=request, args=["update", "[id]"]))),
dict(label=str(T("Items")), _class="action-btn", url=str(URL(r=request, args=["[id]", "ritem"]))),
dict(label=str(T("Pledge")), _class="action-btn", url=str(URL(r=request, args=["[id]", "pledge"])))
]
elif jr.component_name == "pledge":
response.s3.actions = [
dict(label=str(T("Details")), _class="action-btn", url=str(URL(r=request, args=["pledge", "[id]"])))
]
return output
response.s3.postp = req_postp
response.s3.pagination = True
output = shn_rest_controller(module, resource,
editable=True,
#listadd=False,
rheader=shn_rms_rheader)
return output
def ritem():
"RESTful CRUD controller"
resource = request.function
tablename = "%s_%s" % (module, resource)
table = db[tablename]
def postp(jr, output):
shn_action_buttons(jr)
return output
response.s3.postp = postp
#rheader = lambda jr: shn_item_rheader(jr,
# tabs = [(T("Requests for Item"), None),
# (T("Inventories with Item"), "location_item"),
# (T("Requests for Item"), "req"),
# ]
# )
return shn_rest_controller(module,
resource,
#rheader=rheader
)
def pledge():
""" RESTful CRUD controller """
resource = request.function
tablename = module + "_" + resource
table = db[tablename]
# Pre-processor
def prep(r):
if r.representation in ("html", "popup"):
if r.method == "create":
# auto fill posted_on field and make it readonly
table.submitted_on.default = request.now
table.submitted_on.writable = False
person = session.auth.user.id if auth.is_logged_in() else None
if person:
person_uuid = db(db.auth_user.id == person).select(db.auth_user.person_uuid, limitby=(0, 1)).first().person_uuid
person = db(db.pr_person.uuid == person_uuid).select(db.pr_person.id, limitby=(0, 1)).first().id
table.person_id.default = person
return True
response.s3.prep = prep
# Change the request status to completed when pledge delivered
# (this is necessary to close the loop)
#pledges = db(db.rms_pledge.status == 3).select()
#for pledge in pledges:
# req = db(db.rms_req.id == pledge.req_id).update(completion_status = True)
#db.commit()
def pledge_postp(jr, output):
if jr.representation in ("html", "popup"):
if not jr.component:
response.s3.actions = [
dict(label=str(READ), _class="action-btn", url=str(URL(r=request, args=["[id]", "read"])))
]
return output
response.s3.postp = pledge_postp
response.s3.pagination = True
return shn_rest_controller(module,
resource,
editable = True,
#listadd=False
)
def shn_rms_rheader(jr):
if jr.representation == "html":
_next = jr.here()
_same = jr.same()
if jr.name == "req":
aid_request = jr.record
if aid_request:
try:
location = db(db.gis_location.id == aid_request.location_id).select(limitby=(0, 1)).first()
location_represent = shn_gis_location_represent(location.id)
except:
location_represent = None
rheader_tabs = shn_rheader_tabs( jr,
[(T("Edit Details"), None),
(T("Items"), "ritem"),
(T("Pledge"), "pledge"),
]
)
rheader = DIV( TABLE(TR(TH(T("Message: ")),
TD(aid_request.message, _colspan=3)),
TR(TH(T("Priority: ")),
aid_request.priority,
#TH(T("Source Type: ")),
#rms_req_source_type.get(aid_request.source_type, T("unknown"))),
TH(T("Document: ")),
document_represent(aid_request.document_id)),
TR(TH(T("Time of Request: ")),
aid_request.timestmp,
TH(T("Verified: ")),
aid_request.verified),
TR(TH(T("Location: ")),
location_represent,
TH(T("Actionable: ")),
aid_request.actionable)),
rheader_tabs
)
return rheader
return None
# Unused: Was done for Haiti
def sms_complete(): #contributes to RSS feed for closing the loop with Ushahidi
def t(record):
return "Sahana Record Number: " + str(record.id)
def d(record):
ush_id = db(db.rms_sms_request.id == record.id).select("ush_id")[0]["ush_id"]
smsrec = db(db.rms_sms_request.id == record.id).select("smsrec")[0]["smsrec"]
return \
"Ushahidi Link: " + A(ush_id, _href=ush_id).xml() + "<br>" + \
"SMS Record: " + str(smsrec)
rss = { "title" : t , "description" : d }
response.s3.filter = (db.rms_req.completion_status == True) & (db.rms_req.source_type == 2)
return shn_rest_controller(module, "req", editable=False, listadd=False, rss=rss)
# Unused: Was done for Haiti
def tweet_complete(): #contributes to RSS feed for closing the loop with TtT
def t(record):
return "Sahana Record Number: " + str(record.id)
def d(record):
ttt_id = db(db.rms_tweet_request.id == record.id).select("ttt_id")[0]["ttt_id"]
return "Twitter: " + ttt_id
rss = { "title" : t , "description" : d }
response.s3.filter = (db.rms_req.completion_status == True) & (db.rms_req.source_type == 3)
return shn_rest_controller(module, "req", editable=False, listadd=False, rss = rss)
| |
#!/usr/bin/python
# by Mattew Peters, who spotted that sklearn does macro averaging not micro averaging correctly and changed it
import os
from sklearn.metrics import precision_recall_fscore_support
import sys
def calculateMeasures(folder_gold="data/dev/", folder_pred="data_pred/dev/", remove_anno = ""):
'''
Calculate P, R, F1, Macro F
:param folder_gold: folder containing gold standard .ann files
:param folder_pred: folder containing prediction .ann files
:param remove_anno: if set if "rel", relations will be ignored. Use this setting to only evaluate
keyphrase boundary recognition and keyphrase classification. If set to "types", only keyphrase boundary recognition is evaluated.
Note that for the later, false positive
:return:
'''
flist_gold = os.listdir(folder_gold)
res_all_gold = []
res_all_pred = []
targets = []
for f in flist_gold:
# ignoring non-.ann files, should there be any
if not str(f).endswith(".ann"):
continue
f_gold = open(os.path.join(folder_gold, f), "r")
try:
f_pred = open(os.path.join(folder_pred, f), "r")
res_full_pred, res_pred, spans_pred, rels_pred = normaliseAnnotations(f_pred, remove_anno)
except IOError:
print(f + " file missing in " + folder_pred + ". Assuming no predictions are available for this file.")
res_full_pred, res_pred, spans_pred, rels_pred = [], [], [], []
res_full_gold, res_gold, spans_gold, rels_gold = normaliseAnnotations(f_gold, remove_anno)
spans_all = set(spans_gold + spans_pred)
for i, r in enumerate(spans_all):
if r in spans_gold:
target = res_gold[spans_gold.index(r)].split(" ")[0]
res_all_gold.append(target)
if not target in targets:
targets.append(target)
else:
# those are the false positives, contained in pred but not gold
res_all_gold.append("NONE")
if r in spans_pred:
target_pred = res_pred[spans_pred.index(r)].split(" ")[0]
res_all_pred.append(target_pred)
else:
# those are the false negatives, contained in gold but not pred
res_all_pred.append("NONE")
#y_true, y_pred, labels, targets
prec, recall, f1, support = precision_recall_fscore_support(
res_all_gold, res_all_pred, labels=targets, average=None)
# unpack the precision, recall, f1 and support
metrics = {}
for k, target in enumerate(targets):
metrics[target] = {
'precision': prec[k],
'recall': recall[k],
'f1-score': f1[k],
'support': support[k]
}
# now micro-averaged
if remove_anno != 'types':
prec, recall, f1, s = precision_recall_fscore_support(
res_all_gold, res_all_pred, labels=targets, average='micro')
metrics['overall'] = {
'precision': prec,
'recall': recall,
'f1-score': f1,
'support': sum(support)
}
else:
# just binary classification, nothing to average
metrics['overall'] = metrics['KEYPHRASE-NOTYPES']
print_report(metrics, targets)
return metrics
def print_report(metrics, targets, digits=2):
def _get_line(results, target, columns):
line = [target]
for column in columns[:-1]:
line.append("{0:0.{1}f}".format(results[column], digits))
line.append("%s" % results[columns[-1]])
return line
columns = ['precision', 'recall', 'f1-score', 'support']
fmt = '%11s' + '%9s' * 4 + '\n'
report = [fmt % tuple([''] + columns)]
report.append('\n')
for target in targets:
results = metrics[target]
line = _get_line(results, target, columns)
report.append(fmt % tuple(line))
report.append('\n')
# overall
line = _get_line(metrics['overall'], 'avg / total', columns)
report.append(fmt % tuple(line))
report.append('\n')
print(''.join(report))
def normaliseAnnotations(file_anno, remove_anno):
'''
Parse annotations from the annotation files: remove relations (if requested), convert rel IDs to entity spans
:param file_anno:
:param remove_anno:
:return:
'''
res_full_anno = []
res_anno = []
spans_anno = []
rels_anno = []
for l in file_anno:
r_g = l.strip().split("\t")
r_g_offs = r_g[1].split(" ")
# remove relation instances if specified
if remove_anno != "" and r_g_offs[0].endswith("-of"):
continue
res_full_anno.append(l.strip())
# normalise relation instances by looking up entity spans for relation IDs
if r_g_offs[0].endswith("-of"):
arg1 = r_g_offs[1].replace("Arg1:", "")
arg2 = r_g_offs[2].replace("Arg2:", "")
for l in res_full_anno:
r_g_tmp = l.strip().split("\t")
if r_g_tmp[0] == arg1:
ent1 = r_g_tmp[1].replace(" ", "_")
if r_g_tmp[0] == arg2:
ent2 = r_g_tmp[1].replace(" ", "_")
spans_anno.append(" ".join([ent1, ent2]))
res_anno.append(" ".join([r_g_offs[0], ent1, ent2]))
rels_anno.append(" ".join([r_g_offs[0], ent1, ent2]))
else:
spans_anno.append(" ".join([r_g_offs[1], r_g_offs[2]]))
keytype = r_g[1]
if remove_anno == "types":
keytype = "KEYPHRASE-NOTYPES"
res_anno.append(keytype)
for r in rels_anno:
r_offs = r.split(" ")
# reorder hyponyms to start with smallest index
if r_offs[0] == "Synonym-of" and r_offs[2].split("_")[1] < r_offs[1].split("_")[1]: # 1, 2
r = " ".join([r_offs[0], r_offs[2], r_offs[1]])
# Check, in all other hyponym relations, if the synonymous entity with smallest index is used for them.
# If not, change it so it is.
if r_offs[0] == "Synonym-of":
for r2 in rels_anno:
r2_offs = r2.split(" ")
if r2_offs[0] == "Hyponym-of" and r_offs[1] == r2_offs[1]:
r_new = " ".join([r2_offs[0], r_offs[2], r2_offs[2]])
rels_anno[rels_anno.index(r2)] = r_new
if r2_offs[0] == "Hyponym-of" and r_offs[1] == r2_offs[2]:
r_new = " ".join([r2_offs[0], r2_offs[1], r_offs[2]])
rels_anno[rels_anno.index(r2)] = r_new
rels_anno = list(set(rels_anno))
res_full_anno_new = []
res_anno_new = []
spans_anno_new = []
for r in res_full_anno:
r_g = r.strip().split("\t")
if r_g[0].startswith("R") or r_g[0] == "*":
continue
ind = res_full_anno.index(r)
res_full_anno_new.append(r)
res_anno_new.append(res_anno[ind])
spans_anno_new.append(spans_anno[ind])
for r in rels_anno:
res_full_anno_new.append("R\t" + r)
res_anno_new.append(r)
spans_anno_new.append(" ".join([r.split(" ")[1], r.split(" ")[2]]))
return res_full_anno_new, res_anno_new, spans_anno_new, rels_anno
if __name__ == '__main__':
folder_gold = "data/dev/"
folder_pred = "data_pred/dev/"
remove_anno = "" # "", "rel" or "types"
if len(sys.argv) >= 2:
folder_gold = sys.argv[1]
if len(sys.argv) >= 3:
folder_pred = sys.argv[2]
if len(sys.argv) == 4:
remove_anno = sys.argv[3]
calculateMeasures(folder_gold, folder_pred, remove_anno)
| |
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala tests for Hive Metastore, covering the expected propagation
# of metadata from Hive to Impala or Impala to Hive. Each test
# modifies the metadata via Hive and checks that the modification
# succeeded by querying Impala, or vice versa.
#
# TODO: For each test, verify all the metadata available via Hive and
# Impala, in all the possible ways of validating that metadata.
import logging
import pytest
import random
import shlex
import string
import subprocess
from tests.common.test_result_verifier import *
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3, SkipIfIsilon, SkipIfLocal
@SkipIfS3.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
class TestHmsIntegration(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHmsIntegration, cls).add_test_dimensions()
if cls.exploration_strategy() != 'exhaustive':
pytest.skip("Should only run in exhaustive due to long execution time.")
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
cls.TestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def run_stmt_in_hive(self, stmt):
"""
Run a statement in Hive, returning stdout if successful and throwing
RuntimeError(stderr) if not.
"""
call = subprocess.Popen(
['beeline',
'--outputformat=csv2',
'-u', 'jdbc:hive2://' + pytest.config.option.hive_server2,
'-n', getuser(),
'-e', stmt],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = call.communicate()
call.wait()
if call.returncode != 0:
raise RuntimeError(stderr)
return stdout
class ImpalaDbWrapper(object):
"""
A wrapper class for using `with` guards with databases created through
Impala ensuring deletion even if an exception occurs.
"""
def __init__(self, impala, db_name):
self.impala = impala
self.db_name = db_name
def __enter__(self):
self.impala.client.execute(
'create database if not exists ' + self.db_name)
return self.db_name
def __exit__(self, typ, value, traceback):
self.impala.cleanup_db(self.db_name)
class ImpalaTableWrapper(object):
"""
A wrapper class for using `with` guards with tables created through Impala
ensuring deletion even if an exception occurs.
"""
def __init__(self, impala, table_name, table_spec):
self.impala = impala
self.table_name = table_name
self.table_spec = table_spec
def __enter__(self):
self.impala.client.execute(
'create table if not exists %s %s' %
(self.table_name, self.table_spec))
return self.table_name
def __exit__(self, typ, value, traceback):
self.impala.client.execute('drop table if exists %s' % self.table_name)
class HiveDbWrapper(object):
"""
A wrapper class for using `with` guards with databases created through Hive
ensuring deletion even if an exception occurs.
"""
def __init__(self, hive, db_name):
self.hive = hive
self.db_name = db_name
def __enter__(self):
self.hive.run_stmt_in_hive(
'create database if not exists ' + self.db_name)
return self.db_name
def __exit__(self, typ, value, traceback):
self.hive.run_stmt_in_hive(
'drop database if exists %s cascade' % self.db_name)
class HiveTableWrapper(object):
"""
A wrapper class for using `with` guards with tables created through Hive
ensuring deletion even if an exception occurs.
"""
def __init__(self, hive, table_name, table_spec):
self.hive = hive
self.table_name = table_name
self.table_spec = table_spec
def __enter__(self):
self.hive.run_stmt_in_hive(
'create table if not exists %s %s' %
(self.table_name, self.table_spec))
return self.table_name
def __exit__(self, typ, value, traceback):
self.hive.run_stmt_in_hive('drop table if exists %s' % self.table_name)
def impala_table_stats(self, table):
"""Returns a dictionary of stats for a table according to Impala."""
output = self.client.execute('show table stats %s' % table).get_data()
output_lines = output.split('\n')
result = {}
for line in output_lines:
parts = line.split('\t')
stats = {}
stats['location'] = parts[-1]
stats['incremental stats'] = parts[-2]
stats['format'] = parts[-3]
stats['cache replication'] = parts[-4]
stats['bytes cached'] = parts[-5]
stats['size'] = parts[-6]
stats['#files'] = parts[-7]
stats['#rows'] = parts[-8]
result[tuple(parts[:-8])] = stats
return result
def impala_all_column_stats(self, table):
"""Returns a dictionary of stats for columns according to Impala."""
output = self.client.execute('show column stats %s' % table).get_data()
output_lines = output.split('\n')
result = {}
for line in output_lines:
stats = line.split('\t')
attributes = {}
attributes['type'] = stats[1]
attributes['ndv'] = stats[2]
attributes['#nulls'] = stats[3]
attributes['max size'] = stats[4]
attributes['avg size'] = stats[5]
result[stats[0]] = attributes
return result
def hive_column_stats(self, table, column):
"""Returns a dictionary of stats for a column according to Hive."""
output = self.run_stmt_in_hive(
'describe formatted %s %s' %
(table, column))
result = {}
output_lines = output.split('\n')
stat_names = map(string.strip, output_lines[0].split(','))
stat_values = output_lines[3].split(',')
assert len(stat_names) == len(stat_values)
for i in range(0, len(stat_names)):
result[stat_names[i]] = stat_values[i]
return result
def impala_partition_names(self, table_name):
"""Find the names of the partitions of a table, as Impala sees them.
The return format is a list of lists of strings. Each string represents
a partition value of a given column.
"""
rows = self.client.execute('show partitions %s' %
table_name).get_data().split('\n')
rows.pop()
result = []
for row in rows:
fields = row.split('\t')
name = fields[0:-8]
result.append(name)
return result
def hive_partition_names(self, table_name):
"""Find the names of the partitions of a table, as Hive sees them.
The return format is a list of strings. Each string represents a partition
value of a given column in a format like 'column1=7/column2=8'.
"""
return self.run_stmt_in_hive(
'show partitions %s' % table_name).split('\n')[1:-1]
def impala_columns(self, table_name):
"""
Returns a dict with column names as the keys and dicts of type and comments
as the values.
"""
columns = self.client.execute('describe %s' %
table_name).get_data().split('\n')
result = {}
for column in columns:
attributes = column.split('\t')
result[attributes[0]] = {'type': attributes[1], 'comment': attributes[2]}
return result
def hive_columns(self, table_name):
"""
Returns a dict with column names as the keys and dicts of types and
comments as the values.
"""
columns = self.run_stmt_in_hive(
'describe %s' % table_name).split('\n')[1:-1]
result = {}
for column in columns:
attributes = column.split(',')
result[attributes[0]] = {'type': attributes[1], 'comment': attributes[2]}
return result
def unique_string(self):
return ''.join([random.choice(string.ascii_lowercase)
for i in range(0, 16)])
def assert_sql_error(self, engine, command, *strs_in_error):
reached_unreachable = False
try:
engine(command)
reached_unreachable = True
except Exception as e:
for str_in_error in strs_in_error:
assert str_in_error in str(e)
if reached_unreachable:
assert False, '%s should have triggered an error containing %s' % (
command, strs_in_error)
@pytest.mark.execute_serially
def test_hive_db_hive_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.HiveDbWrapper,
self.HiveTableWrapper)
@pytest.mark.execute_serially
def test_hive_db_impala_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.HiveDbWrapper,
self.ImpalaTableWrapper)
@pytest.mark.execute_serially
def test_impala_db_impala_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.ImpalaDbWrapper,
self.ImpalaTableWrapper)
@pytest.mark.execute_serially
def test_impala_db_hive_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.ImpalaDbWrapper,
self.HiveTableWrapper)
@pytest.mark.xfail(run=False, reason="This is a bug: IMPALA-2426")
@pytest.mark.execute_serially
def test_incremental_stats_new_partition(self, vector):
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int)') as table_name:
self.client.execute('insert into table %s partition (y=42) values (2)'
% table_name)
self.run_stmt_in_hive('alter table %s add partition (y = 333)'
% table_name)
self.client.execute('compute incremental stats %s' % table_name)
table_stats = self.impala_table_stats(table_name)
assert 'true' == table_stats[('333',)]['incremental stats']
assert '0' == table_stats[('333',)]['#rows']
assert '0' == table_stats[('333',)]['#files']
def add_hive_partition_helper(self, vector, DbWrapper, TableWrapper):
"""
Partitions added in Hive can be viewed in Impala after computing stats.
"""
with DbWrapper(self, self.unique_string()) as db_name:
self.client.execute('invalidate metadata')
with TableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int, z int)') as table_name:
# Invalidate metadata so Impala can see the table
self.client.execute('invalidate metadata')
self.run_stmt_in_hive(
'alter table %s add partition (y = 333, z = 5309)' %
table_name)
self.client.execute('compute incremental stats %s' % table_name)
# Impala can see the partition's name
assert [['333', '5309']] == self.impala_partition_names(table_name)
# Impala's compute stats didn't alter Hive's knowledge of the partition
assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
self.add_hive_partition_table_stats_helper(vector, DbWrapper, TableWrapper)
def add_hive_partition_table_stats_helper(
self, vector, DbWrapper, TableWrapper):
"""
Partitions added in Hive don't make Impala's table stats incorrect.
"""
# TODO: check the same thing with column stats
with DbWrapper(self, self.unique_string()) as db_name:
self.client.execute('invalidate metadata')
with TableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int, z int)') as table_name:
# Invalidate metadata so Impala can see the table
self.client.execute('invalidate metadata')
self.client.execute(
'insert into table %s partition (y=42, z=867) values (2)'
% table_name)
self.client.execute('compute incremental stats %s' % table_name)
impala_table_stats = self.impala_table_stats(table_name)
self.run_stmt_in_hive(
'alter table %s add partition (y = 333, z = 5309)' %
table_name)
self.client.execute('compute incremental stats %s' % table_name)
assert impala_table_stats[
('42', '867')] == self.impala_table_stats(table_name)[
('42', '867')]
@pytest.mark.execute_serially
def test_add_impala_partition(self, vector):
"""
Partitions added in Impala can be viewed in Hive immediately
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int, z int)') as table_name:
self.client.execute(
'insert into table %s partition (y=42, z=867) values (2)'
% table_name)
assert [['42', '867']] == self.impala_partition_names(table_name)
assert ['y=42/z=867'] == self.hive_partition_names(table_name)
@pytest.mark.execute_serially
def test_drop_column_maintains_stats(self, vector):
"""
Dropping a column in Impala doesn't alter the stats of other columns in Hive
or Impala.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int, z int)') as table_name:
self.run_stmt_in_hive('select * from %s' % table_name)
self.run_stmt_in_hive(
'use %s; analyze table %s compute statistics for columns' %
(db_name, table_name.split('.')[1]))
self.client.execute('compute stats %s' % table_name)
hive_x_stats = self.hive_column_stats(table_name, 'x')
hive_y_stats = self.hive_column_stats(table_name, 'y')
impala_stats = self.impala_all_column_stats(table_name)
self.client.execute('alter table %s drop column z' % table_name)
assert hive_x_stats == self.hive_column_stats(table_name, 'x')
assert hive_y_stats == self.hive_column_stats(table_name, 'y')
assert impala_stats['x'] == self.impala_all_column_stats(table_name)[
'x']
assert impala_stats['y'] == self.impala_all_column_stats(table_name)[
'y']
self.run_stmt_in_hive(
'alter table %s replace columns (x int)' %
table_name)
assert hive_x_stats == self.hive_column_stats(table_name, 'x')
assert impala_stats['x'] == self.impala_all_column_stats(table_name)[
'x']
@pytest.mark.execute_serially
def test_select_without_compute_stats(self, vector):
"""
Data added in Hive shows up in Impala 'select *', and if the table is not
partitioned, 'compute incremental stats' is not required.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
# In the unpartitioned case, 'compute incremental stats' is not
# required.
self.run_stmt_in_hive(
'insert into table %s values (66)'
% table_name)
assert '66' == self.client.execute(
'select * from %s' % table_name).get_data()
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int)') as table_name:
assert [] == self.impala_partition_names(table_name)
self.run_stmt_in_hive(
'insert into table %s partition (y=33) values (44)'
% table_name)
self.client.execute('compute incremental stats %s' % table_name)
assert '44\t33' == self.client.execute(
'select * from %s' % table_name).get_data()
@pytest.mark.xfail(run=False, reason="This is a bug: IMPALA-2458")
@pytest.mark.execute_serially
def test_overwrite_added_column(self, vector):
"""
Impala can't overwrite Hive's column types, and vice versa.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
inttype = {'comment': '', 'type': 'int'}
hive_expected = {'x': inttype, 'y': inttype}
impala_expected = {'x': inttype, 'y': inttype}
# Hive and Impala both know all columns:
assert hive_expected == self.hive_columns(table_name)
assert impala_expected == self.impala_columns(table_name)
# Add column in Hive but don't tell Impala
self.run_stmt_in_hive(
'alter table %s add columns (z int)' % table_name)
hive_expected['z'] = inttype
assert hive_expected == self.hive_columns(table_name)
# Overwriting an Hive-created column in Impala does not work
self.assert_sql_error(
self.client.execute,
'alter table %s add columns (z string)' %
table_name,
'Column already exists: z')
# Overwriting an Impala-created column in Hive does not work
self.client.execute(
'alter table %s add columns (v string)' % table_name)
self.assert_sql_error(
self.run_stmt_in_hive,
'alter table %s add columns (v string)' %
table_name,
'Duplicate column name: v')
@pytest.mark.execute_serially
def test_compute_stats_get_to_hive(self, vector):
"""Stats computed in Impala are also visible in Hive."""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33)' % table_name)
hive_stats = self.hive_column_stats(table_name, 'x')
impala_stats = self.client.execute('show column stats %s' % table_name)
self.client.execute('compute stats %s' % table_name)
assert impala_stats != self.client.execute(
'show column stats %s' % table_name)
assert hive_stats != self.hive_column_stats(table_name, 'x')
@pytest.mark.execute_serially
def test_compute_stats_get_to_impala(self, vector):
"""Column stats computed in Hive are also visible in Impala."""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
hive_stats = self.hive_column_stats(table_name, 'x')
self.client.execute('invalidate metadata')
self.client.execute('refresh %s' % table_name)
impala_stats = self.impala_all_column_stats(table_name)
self.run_stmt_in_hive(
'insert into table %s values (33)' % table_name)
self.run_stmt_in_hive(
'use %s; analyze table %s compute statistics for columns' %
(db_name, table_name.split('.')[1]))
new_hive_stats = self.hive_column_stats(table_name, 'x')
assert hive_stats != new_hive_stats
assert '33' == new_hive_stats['min']
assert '33' == new_hive_stats['max']
assert '0' == new_hive_stats['num_nulls']
self.client.execute('refresh %s' % table_name)
new_impala_stats = self.impala_all_column_stats(table_name)
assert impala_stats != new_impala_stats
assert '0' == new_impala_stats['x']['#nulls']
@pytest.mark.execute_serially
def test_drop_partition(self, vector):
"""
Impala can see that a partitions was dropped by Hive by invalidating
metadata.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s partition(y=33) values (44)' % table_name)
self.client.execute('compute stats %s' % table_name)
self.run_stmt_in_hive(
'alter table %s drop partition (y=33)' % table_name)
self.client.execute('invalidate metadata %s' % table_name)
assert '' == self.client.execute(
'select * from %s' % table_name).get_data()
@pytest.mark.execute_serially
def test_drop_column_with_data(self, vector):
"""Columns dropped by Hive are ignored in Impala 'select *'."""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33,44)' % table_name)
self.run_stmt_in_hive(
'alter table %s replace columns (x int)' % table_name)
assert '33' == self.client.execute(
'select * from %s' % table_name).get_data()
@pytest.mark.execute_serially
def test_add_column(self, vector):
"""Columns added in one engine are visible in the other via DESCRIBE."""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
int_column = {'type': 'int', 'comment': ''}
expected = {'x': int_column}
assert expected == self.hive_columns(table_name)
assert expected == self.impala_columns(table_name)
self.client.execute('alter table %s add columns (y int)' % table_name)
expected['y'] = int_column
assert expected == self.hive_columns(table_name)
assert expected == self.impala_columns(table_name)
self.run_stmt_in_hive(
'alter table %s add columns (z int)' %
table_name)
self.client.execute('invalidate metadata %s' % table_name)
expected['z'] = int_column
assert expected == self.hive_columns(table_name)
assert expected == self.impala_columns(table_name)
@pytest.mark.execute_serially
def test_drop_database(self, vector):
"""
If a DB is created, then dropped, in Hive, Impala can create one with the
same name without invalidating metadata.
"""
test_db = self.unique_string()
with self.HiveDbWrapper(self, test_db) as db_name:
pass
self.assert_sql_error(
self.client.execute,
'create table %s.%s (x int)' %
(test_db,
self.unique_string()),
'Database does not exist: %s' %
test_db)
with self.ImpalaDbWrapper(self, test_db) as db_name:
pass
@pytest.mark.execute_serially
def test_table_format_change(self, vector):
"""
Hive storage format changes propagate to Impala.
"""
# TODO: check results of insert, then select * before and after
# storage format change.
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int) stored as parquet') as table_name:
self.client.execute('invalidate metadata')
self.client.execute('invalidate metadata %s' % table_name)
print self.impala_table_stats(table_name)
assert 'PARQUET' == self.impala_table_stats(table_name)[()]['format']
self.run_stmt_in_hive(
'alter table %s set fileformat avro' % table_name)
self.client.execute('invalidate metadata %s' % table_name)
assert 'AVRO' == self.impala_table_stats(table_name)[()]['format']
@pytest.mark.execute_serially
def test_change_column_type(self, vector):
"""Hive column type changes propagate to Impala."""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33,44)' % table_name)
self.run_stmt_in_hive('alter table %s change y y string' % table_name)
assert '33,44' == self.run_stmt_in_hive(
'select * from %s' % table_name).split('\n')[1]
self.client.execute('invalidate metadata %s' % table_name)
assert '33\t44' == self.client.execute(
'select * from %s' % table_name).get_data()
assert 'string' == self.impala_columns(table_name)['y']['type']
@pytest.mark.execute_serially
def test_change_parquet_column_type(self, vector):
"""
Changing column types in Parquet doesn't work in Hive and it causes
'select *' to fail in Impala as well, after invalidating metadata. This is a
known issue with changing column types in Hive/parquet.
"""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int) stored as parquet') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33,44)' % table_name)
assert '33,44' == self.run_stmt_in_hive(
'select * from %s' % table_name).split('\n')[1]
self.client.execute('invalidate metadata')
assert '33\t44' == self.client.execute(
'select * from %s' % table_name).get_data()
self.run_stmt_in_hive('alter table %s change y y string' % table_name)
self.assert_sql_error(
self.run_stmt_in_hive, 'select * from %s' %
table_name, 'Cannot inspect org.apache.hadoop.io.IntWritable')
self.client.execute('invalidate metadata %s' % table_name)
self.assert_sql_error(
self.client.execute,
'select * from %s' %
table_name,
"Column type: STRING, Parquet schema:")
@pytest.mark.execute_serially
def test_change_table_name(self, vector):
"""
Changing the table name in Hive propagates to Impala after 'invalidate
metadata'.
"""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
self.client.execute('invalidate metadata')
int_column = {'type': 'int', 'comment': ''}
expected_columns = {'x': int_column, 'y': int_column}
assert expected_columns == self.impala_columns(table_name)
new_name = table_name + '2'
self.run_stmt_in_hive('alter table %s rename to %s' %
(table_name, new_name))
self.client.execute('invalidate metadata')
assert expected_columns == self.impala_columns(new_name)
self.assert_sql_error(self.client.execute,
'describe %s' % table_name,
'Could not resolve path')
| |
from .extern.six import PY3
if PY3: # pragma: py3
# Stuff to do if Python 3
import io
# Make the decode_ascii utility function actually work
import pyfits.util
import numpy
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, numpy.ndarray) and
issubclass(s.dtype.type, numpy.str_)):
ns = numpy.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((numpy.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, numpy.ndarray) and
not issubclass(s.dtype.type, numpy.bytes_)):
raise TypeError('string operation on non-string array')
return s
pyfits.util.encode_ascii = encode_ascii
def decode_ascii(s):
if isinstance(s, bytes):
return s.decode('ascii')
elif (isinstance(s, numpy.ndarray) and
issubclass(s.dtype.type, numpy.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = numpy.array([], dtype=dt).view(type(s))
else:
ns = numpy.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((numpy.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, numpy.ndarray) and
not issubclass(s.dtype.type, numpy.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
pyfits.util.decode_ascii = decode_ascii
# See the docstring for pyfits.util.fileobj_open for why we need to replace
# this function
def fileobj_open(filename, mode):
return open(filename, mode, buffering=0)
pyfits.util.fileobj_open = fileobj_open
# Support the io.IOBase.readable/writable methods
from pyfits.util import isreadable as _isreadable
def isreadable(f):
if hasattr(f, 'readable'):
return f.readable()
return _isreadable(f)
pyfits.util.isreadable = isreadable
from pyfits.util import iswritable as _iswritable
def iswritable(f):
if hasattr(f, 'writable'):
return f.writable()
return _iswritable(f)
pyfits.util.iswritable = iswritable
# isfile needs to support the higher-level wrappers around FileIO
def isfile(f):
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
pyfits.util.isfile = isfile
# Here we monkey patch (yes, I know) numpy to fix a few numpy Python 3
# bugs. The only behavior that's modified is that bugs are fixed, so that
# should be OK.
# Fix chararrays; this is necessary in numpy 1.5.1 and below--hopefully
# should not be necessary later. See
# http://projects.scipy.org/numpy/ticket/1817
# TODO: Maybe do a version check on numpy for this? (Note: the fix for
# this hasn't been accepted in Numpy yet, so a version number check would
# not be helpful yet...)
import pyfits.file
_chararray = numpy.char.chararray
class chararray(_chararray):
def __getitem__(self, obj):
val = numpy.ndarray.__getitem__(self, obj)
if isinstance(val, numpy.character):
temp = val.rstrip()
if numpy.char._len(temp) == 0:
val = ''
else:
val = temp
return val
for m in [numpy.char, numpy.core.defchararray, numpy.core.records]:
m.chararray = chararray
# Fix recarrays with sub-array fields. See
# http://projects.scipy.org/numpy/ticket/1766
# TODO: Same as above, though the fix to this problem hasn't made it into
# any Numpy release yet either, so we'll have to hold off on a version
# check
def _fix_dtype(dtype):
"""
Numpy has a bug (in Python3 only) that causes a segfault when
accessing the data of arrays containing nested arrays. Specifically,
this happens if the shape of the subarray is not given as a tuple.
See http://projects.scipy.org/numpy/ticket/1766.
"""
if not hasattr(dtype, 'fields') or dtype.fields is None:
return dtype
formats = []
offsets = []
titles = []
for name in dtype.names:
field = dtype.fields[name]
shape = field[0].shape
if not isinstance(shape, tuple):
shape = (shape,)
formats.append((field[0].base, shape))
offsets.append(field[1])
# There seems to be no obvious way to extract the titles from
# a dtype, so this just searches for duplicate fields
title = None
for key, dup in dtype.fields.items():
if key != name and dup == field:
title = key
break
titles.append(title)
return numpy.dtype({'names': dtype.names, 'formats': formats,
'offsets': offsets, 'titles': titles})
_recarray = numpy.recarray
class recarray(_recarray):
def __new__(subtype, shape, dtype=None, buf=None, offset=0,
strides=None, formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
dtype = _fix_dtype(dtype)
if 'order' in _recarray.__new__.__code__.co_varnames:
return _recarray.__new__(
subtype, shape, dtype, buf, offset, strides, formats,
names, titles, byteorder, aligned, order)
else:
return _recarray.__new__(
subtype, shape, dtype, buf, offset, strides, formats,
names, titles, byteorder, aligned)
numpy.recarray = numpy.core.records.recarray = recarray
# We also need to patch pyfits.file._File which can also be affected by the
# #1766 bug
old_File = pyfits.file._File
class _File(old_File):
def readarray(self, size=None, offset=0, dtype=numpy.uint8,
shape=None):
if isinstance(dtype, numpy.dtype):
dtype = _fix_dtype(dtype)
return old_File.readarray(self, size, offset, dtype, shape)
readarray.__doc__ = old_File.readarray.__doc__
pyfits.file._File = _File
# Replace pyfits.util.maketrans and translate with versions that work
# with Python 3 unicode strings
pyfits.util.maketrans = str.maketrans
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
pyfits.util.translate = translate
else:
# Stuff to do if not Python 3
import string
import pyfits.util
pyfits.util.maketrans = string.maketrans
| |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContactsService extends the GDataService for Google Contacts operations.
ContactsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'dbrattli (Dag Brattli)'
import gdata
import gdata.calendar
import gdata.service
DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full'
'/batch')
DEFAULT_PROFILES_BATCH_URL = ('http://www.google.com'
'/m8/feeds/profiles/default/full/batch')
GDATA_VER_HEADER = 'GData-Version'
class Error(Exception):
pass
class RequestError(Error):
pass
class ContactsService(gdata.service.GDataService):
"""Client for the Google Contacts service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com', additional_headers=None,
contact_list='default', **kwargs):
"""Creates a client for the Contacts service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.google.com'.
contact_list: string (optional) The name of the default contact list to
use when no URI is specified to the methods of the service.
Default value: 'default' (the logged in user's contact list).
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
self.contact_list = contact_list
gdata.service.GDataService.__init__(
self, email=email, password=password, service='cp', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetFeedUri(self, kind='contacts', contact_list=None, projection='full',
scheme=None):
"""Builds a feed URI.
Args:
kind: The type of feed to return, typically 'groups' or 'contacts'.
Default value: 'contacts'.
contact_list: The contact list to return a feed for.
Default value: self.contact_list.
projection: The projection to apply to the feed contents, for example
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
scheme: The URL scheme such as 'http' or 'https', None to return a
relative URI without hostname.
Returns:
A feed URI using the given kind, contact list, and projection.
Example: '/m8/feeds/contacts/default/full'.
"""
contact_list = contact_list or self.contact_list
if kind == 'profiles':
contact_list = 'domain/%s' % contact_list
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
def GetContactsFeed(self, uri=None):
uri = uri or self.GetFeedUri()
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
def GetContact(self, uri):
return self.Get(uri, converter=gdata.contacts.ContactEntryFromString)
def CreateContact(self, new_contact, insert_uri=None, url_params=None,
escape_params=True):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(new_contact, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
escape_params=True):
"""Updates an existing contact.
Args:
edit_uri: string The edit link URI for the element being updated
updated_contact: string, atom.Entry or subclass containing
the Atom Entry which will replace the contact which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Put(updated_contact, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def DeleteContact(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an contact with the specified ID from Google Contacts.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'/m8/feeds/contacts/default/full/xxx/yyy'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def GetGroupsFeed(self, uri=None):
uri = uri or self.GetFeedUri('groups')
return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString)
def CreateGroup(self, new_group, insert_uri=None, url_params=None,
escape_params=True):
insert_uri = insert_uri or self.GetFeedUri('groups')
return self.Post(new_group, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def UpdateGroup(self, edit_uri, updated_group, url_params=None,
escape_params=True):
return self.Put(updated_group, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def DeleteGroup(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def ChangePhoto(self, media, contact_entry_or_url, content_type=None,
content_length=None):
"""Change the photo for the contact by uploading a new photo.
Performs a PUT against the photo edit URL to send the binary data for the
photo.
Args:
media: filename, file-like-object, or a gdata.MediaSource object to send.
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
method will search for an edit photo link URL and
perform a PUT to the URL.
content_type: str (optional) the mime type for the photo data. This is
necessary if media is a file or file name, but if media
is a MediaSource object then the media object can contain
the mime type. If media_type is set, it will override the
mime type in the media object.
content_length: int or str (optional) Specifying the content length is
only required if media is a file-like object. If media
is a filename, the length is determined using
os.path.getsize. If media is a MediaSource object, it is
assumed that it already contains the content length.
"""
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if isinstance(media, gdata.MediaSource):
payload = media
# If the media object is a file-like object, then use it as the file
# handle in the in the MediaSource.
elif hasattr(media, 'read'):
payload = gdata.MediaSource(file_handle=media,
content_type=content_type, content_length=content_length)
# Assume that the media object is a file name.
else:
payload = gdata.MediaSource(content_type=content_type,
content_length=content_length, file_path=media)
return self.Put(payload, url)
def GetPhoto(self, contact_entry_or_url):
"""Retrives the binary data for the contact's profile photo as a string.
Args:
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
containing the photo link's URL. If the contact entry does not
contain a photo link, the image will not be fetched and this method
will return None.
"""
# TODO: add the ability to write out the binary image data to a file,
# reading and writing a chunk at a time to avoid potentially using up
# large amounts of memory.
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
photo_link = contact_entry_or_url.GetPhotoLink()
if photo_link:
url = photo_link.href
else:
url = contact_entry_or_url
if url:
return self.Get(url, converter=str)
else:
return None
def DeletePhoto(self, contact_entry_or_url):
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if url:
self.Delete(url)
def GetProfilesFeed(self, uri=None):
"""Retrieves a feed containing all domain's profiles.
Args:
uri: string (optional) the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full
Returns:
On success, a ProfilesFeed containing the profiles.
On failure, raises a RequestError.
"""
uri = uri or self.GetFeedUri('profiles')
return self.Get(uri,
converter=gdata.contacts.ProfilesFeedFromString)
def GetProfile(self, uri):
"""Retrieves a domain's profile for the user.
Args:
uri: string the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full/username
Returns:
On success, a ProfileEntry containing the profile for the user.
On failure, raises a RequestError
"""
return self.Get(uri,
converter=gdata.contacts.ProfileEntryFromString)
def UpdateProfile(self, edit_uri, updated_profile, url_params=None,
escape_params=True):
"""Updates an existing profile.
Args:
edit_uri: string The edit link URI for the element being updated
updated_profile: string atom.Entry or subclass containing
the Atom Entry which will replace the profile which is
stored at the edit_url.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_params will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, raises a RequestError.
"""
return self.Put(updated_profile, self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params,
converter=gdata.contacts.ProfileEntryFromString)
def ExecuteBatch(self, batch_feed, url,
converter=gdata.contacts.ContactsFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.contacts.ContactFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is ContactsFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ContactsFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def ExecuteBatchProfiles(self, batch_feed, url,
converter=gdata.contacts.ProfilesFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: string The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is
gdata.profiles.ProfilesFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ProfilesFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def _CleanUri(self, uri):
"""Sanitizes a feed URI.
Args:
uri: The URI to sanitize, can be relative or absolute.
Returns:
The given URI without its http://server prefix, if any.
Keeps the leading slash of the URI.
"""
url_prefix = 'http://%s' % self.server
if uri.startswith(url_prefix):
uri = uri[len(url_prefix):]
return uri
class ContactsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None, group=None):
self.feed = feed or '/m8/feeds/contacts/default/full'
if group:
self._SetGroup(group)
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
def _GetGroup(self):
if 'group' in self:
return self['group']
else:
return None
def _SetGroup(self, group_id):
self['group'] = group_id
group = property(_GetGroup, _SetGroup,
doc='The group query parameter to find only contacts in this group')
class GroupsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/groups/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
class ProfilesQuery(gdata.service.Query):
"""Constructs a query object for the profiles feed."""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/profiles/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
| |
import logging
from . import generic
from .elfreloc import ELFReloc
from ....errors import CLEOperationError
l = logging.getLogger(name=__name__)
arch = 'ARM'
# Reference: "ELF for the ARM Architecture ABI r2.10"
# http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044e/IHI0044E_aaelf.pdf
def _applyReloc(inst, result, mask=0xFFFFFFFF):
"""
Applies the specified mask to the relocation and verifies that the mask
is valid for the given result.
"""
try:
if result & ~mask:
raise ValueError('result & ~mask is not 0.')
except ValueError as ex:
l.warning("Relocation failed: %r", ex)
return 0 # worst case, you hook it yourself
return ((inst & ~mask) | (result & mask)) # pylint: disable=superfluous-parens
def _isThumbFunc(symbol, addr):
"""
Checks whether the provided symbol and address is a Thumb function by
verifying the LSB is 1 and the symbol is STT_FUNC.
"""
return (addr % 2 == 1) and symbol.is_function
class R_ARM_CALL(ELFReloc):
"""
Relocate R_ARM_CALL symbols via instruction modification. It additionally
handles R_ARM_PC24 and R_ARM_JUMP24. The former is deprecated and is now
just the same as R_ARM_CALL.
R_ARM_JUMP24 doesn't need the Thumb check. Technically, if the Thumb check
succeeds on R_ARM_JUMP24, it's a bad call that shouldn't have been generated
by the linker, so we may as well as just treat it like R_ARM_CALL.
- Class: Static
- Type: ARM (R_ARM_CALL, R_ARM_JUMP24); Deprecated (R_ARM_PC24)
- Code: 1 (R_ARM_PC24), 28 (R_ARM_CALL), 29 (R_ARM_JUMP24)
- Operation: ((S + A) | T) - P
- S is the address of the symbol
- A is the addend
- P is the target location (place being relocated)
- T is 1 if the symbol is of type STT_FUNC and addresses a Thumb instruction
"""
@property
def value(self):
P = self.rebased_addr # Location of this instruction
A = inst = self.addend # The instruction
S = self.resolvedby.rebased_addr # The symbol's "value", where it points to
T = _isThumbFunc(self.symbol, S)
if inst & 0x00800000: A |= 0xFF000000 # Sign extend to 32-bits
result = ((S + (A << 2)) | T) - P # Do the initial work
imm24 = (result & 0x03FFFFFE) >> 2 # Sign_extend(inst[25:2])
if T: # Do Thumb relocation
mask = 0xFF000000
bit_h = (result & 0x02) >> 1
result = _applyReloc(inst, (0xFA | bit_h), mask)
else: # Do ARM relocation
mask = 0xFFFFFF
result = _applyReloc(inst, imm24, mask)
l.debug("%s relocated as R_ARM_CALL with new instruction: %#x", self.symbol.name, result)
return result
class R_ARM_PREL31(ELFReloc):
"""
Relocate R_ARM_PREL31 symbols via instruction modification. The difference
between this and R_ARM_CALL/R_ARM_PC24/R_ARM_JUMP24 is that it's a data
relocation
- Class: Static
- Type: Data
- Code: 42
- Operation: ((S + A) | T) - P
- S is the address of the symbol
- A is the addend
- P is the target location (place being relocated)
- T is 1 if the symbol is of type STT_FUNC and addresses a Thumb instruction
"""
@property
def value(self):
P = self.rebased_addr # Location of this instruction
A = self.addend # The instruction
S = self.resolvedby.rebased_addr # The symbol's "value", where it points to
T = _isThumbFunc(self.symbol, S)
if A & 0x01000000: A |= 0xF1000000 # Sign extend 31-bits
result = ((S + A) | T) - P # Do the initial work
mask = 0x7FFFFFFF
rel31 = result & mask
result = _applyReloc(A, rel31, mask)
l.debug("%s relocated as R_ARM_PREL31 to: 0x%x", self.symbol.name, result)
return result
class R_ARM_REL32(ELFReloc):
"""
Relocate R_ARM_REL32 symbols. This is essentially the same as
generic.GenericPCRelativeAddendReloc with the addition of a check
for whether or not the target is Thumb.
- Class: Static
- Type: Data
- Code: 3
- Operation: ((S + A) | T) - P
- S is the address of the symbol
- A is the addend
- P is the target location (place being relocated)
- T is 1 if the symbol is of type STT_FUNC and addresses a Thumb instruction
"""
@property
def value(self):
P = self.rebased_addr # Location of this instruction
A = self.addend # The instruction
S = self.resolvedby.rebased_addr # The symbol's "value", where it points to
T = _isThumbFunc(self.symbol, S)
result = ((S + A) | T) - P
l.debug("%s relocated as R_ARM_REL32 to: 0x%x", self.symbol.name, result)
return result
class R_ARM_ABS32(ELFReloc):
"""
Relocate R_ARM_ABS32 symbols. This is essentially the same as
generic.GenericAbsoluteAddendReloc with the addition of a check
for whether or not the target is Thumb.
- Class: Static
- Type: Data
- Code: 3
- Operation: (S + A) | T
- S is the address of the symbol
- A is the addend
- T is 1 if the symbol is of type STT_FUNC and addresses a Thumb instruction
"""
@property
def value(self):
A = self.addend # The instruction
S = self.resolvedby.rebased_addr # The symbol's "value", where it points to
T = _isThumbFunc(self.symbol, S)
result = (S + A) | T
l.debug("%s relocated as R_ARM_ABS32 to: 0x%x", self.symbol.name, result)
return result
class R_ARM_MOVW_ABS_NC(ELFReloc):
"""
Relocate R_ARM_MOVW_ABS_NC symbols.
- Class: Static
- Type: Instruction
- Code: 43
- Operation: (S + A) | T
- S is the address of the symbol
- A is the addend
- T is 1 if the symbol is of type STT_FUNC and addresses a Thumb instruction
"""
@property
def value(self):
inst = self.addend # The instruction
S = self.resolvedby.rebased_addr # The symbol's "value", where it points to
T = _isThumbFunc(self.symbol, S)
# initial addend is formed by interpreting the 16-bit literal field
# of the instruction as a signed value
A = ((inst & 0xf0000) >> 4) | (inst & 0xfff)
if (A & 0x8000):
# two's complement
A = -((A ^ 0xffff) + 1)
X = ((S + A) | T)
MaskX = X & 0xffff
# inst modification:
part1 = MaskX >> 12
part2 = MaskX & 0xFFF
inst &= 0xfff0f000 # clears inst[11, 0] and inst[19, 16]
inst |= ((part1 << 16) & 0xf0000) # inst[19, 16] = part1
inst |= (part2 & 0xfff) # inst[11, 0] = part2
l.debug("%s relocated as R_ARM_MOVW_ABS_NC to: 0x%x", self.symbol.name, inst)
return inst
class R_ARM_MOVT_ABS(ELFReloc):
"""
Relocate R_ARM_MOVT_ABS symbols.
- Class: Static
- Type: Instruction
- Code: 44
- Operation: S + A
- S is the address of the symbol
- A is the addend
"""
@property
def value(self):
inst = self.addend # The instruction
S = self.resolvedby.rebased_addr # The symbol's "value", where it points to
# initial addend is formed by interpreting the 16-bit literal field
# of the instruction as a signed value
A = ((inst & 0xf0000) >> 4) | (inst & 0xfff)
if (A & 0x8000):
# two's complement
A = -((A ^ 0xffff) + 1)
X = (S + A)
MaskX = X & 0xffff0000
# inst modification:
part1 = (MaskX >> 16) >> 12
part2 = (MaskX >> 16) & 0xFFF
inst &= 0xfff0f000 # clears inst[11, 0] and inst[19, 16]
inst |= ((part1 << 16) & 0xf0000) # inst[19, 16] = part1
inst |= (part2 & 0xfff) # inst[11, 0] = part2
l.debug("%s relocated as R_ARM_MOVT_ABS to: 0x%x", self.symbol.name, inst)
return inst
class R_ARM_THM_CALL(ELFReloc):
"""
Relocate R_ARM_THM_CALL symbols via instruction modification.
- Class: Static
- Type: ARM (R_ARM_THM_CALL)
- Code: 10
- Operation: ((S + A) | T) - P
- S is the address of the symbol
- A is the addend
- P is the target location (place being relocated)
- T is 1 if the symbol is of type STT_FUNC and addresses a Thumb instruction (This bit is entirely irrelevant because the 1-bit of the address gets shifted off in the encoding)
- Encoding: See http://hermes.wings.cs.wisc.edu/files/Thumb-2SupplementReferenceManual.pdf
- Page 71 (3-31) has the chart
- It appears that it mistakenly references the I1 and I2 bits as J1 and J2 in the chart (see the notes at the bottom of the page -- the ranges don't make sense)
- However, the J1/J2 bits are XORed with !S bit in this case (see vex implementation: https://github.com/angr/vex/blob/6d1252c7ce8fe8376318b8f8bb8034058454c841/priv/guest_arm_toIR.c#L19219 )
- Implementation appears correct with the bits placed into offset[23:22]
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._insn_bytes = None
def resolve_symbol(self, solist, **kwargs):
kwargs['thumb'] = True
super().resolve_symbol(solist, **kwargs)
@property
def value(self):
P = self.rebased_addr # Location of this instruction
S = self.resolvedby.rebased_addr # The symbol's "value", where it points to
T = _isThumbFunc(self.symbol, S)
A = 0
# Deconstruct the instruction:
# Because this 4-byte instruction is treated as two 2-byte instructions,
# the bytes are in the order `b3 b4 b1 b2`, where b4 is the most significant.
if self._insn_bytes is None:
self._insn_bytes = self.owner.memory.load(self.relative_addr, 4)
hi = (self._insn_bytes[1] << 8) | self._insn_bytes[0]
lo = (self._insn_bytes[3] << 8) | self._insn_bytes[2]
inst = (hi << 16) | lo
def gen_mask(n_bits, first_bit):
"""
Builds a mask that captures n_bits, where the first bit captured is first_bit
"""
return ((1 << n_bits) - 1) << first_bit
if self.is_rela:
A = self.addend
else:
# Build A (the initial addend)
A |= (inst & gen_mask(11, 0)) << 1 # A[11:1] = inst[10:0] (inclusive)
A |= ((inst & gen_mask(10, 16)) >> 16) << 12 # A[21:12] = inst[25:16]
sign_bit = bool(inst & gen_mask(1, 26)) & 1 # sign_bit = inst[26]
J1 = (bool(inst & gen_mask(1, 13)) & 1) ^ (not sign_bit) # J1 = inst[13] ^ !sign
J2 = (bool(inst & gen_mask(1, 11)) & 1) ^ (not sign_bit) # J2 = inst[11] ^ !sign
A |= J1 << 23 # A[23] = J1
A |= J2 << 22 # A[22] = J2
A &= 0x7fffff
if sign_bit:
A |= 0xff800000
# Compute X, the new offset, from the symbol addr, S, the addend, A,
# the thumb flag, T, and PC, P.
x = (((S + A) | T) - P) & 0xffffffff # Also mask to 32 bits
# Ensure jump is in range
if x & 0xff800000 != 0 and x & 0xff800000 != 0xff800000:
raise CLEOperationError("Jump target out of range for reloc R_ARM_THM_CALL (+- 2^23). "
"This may be due to SimProcedures being allocated outside the jump range. "
"If you believe this is the case, set 'rebase_granularity'=0x1000 in the "
"load options.")
# Rebuild the instruction, first clearing out any previously set offset bits
# offset 1 2 offset
# 11110S [21:12] 11J?J [11:1] (if ? is 1, BL; if ? is 0, BLX)
inst &= ~0b00000111111111110010111111111111
# | | | | |
# 32 24 16 8 0
sign_bit = bool(x & gen_mask(1, 24)) & 1
J1 = (bool(x & gen_mask(1, 23)) & 1) ^ (not sign_bit)
J2 = (bool(x & gen_mask(1, 22)) & 1) ^ (not sign_bit)
inst |= sign_bit << 26
inst |= J1 << 13
inst |= J2 << 11
inst |= (x & gen_mask(11, 1)) >> 1
inst |= ((x & gen_mask(10, 12)) >> 12) << 16
# Put it back into <little endian short> <little endian short> format
raw = ((inst & 0x00ff0000) >> 16, (inst & 0xff000000) >> 24,
(inst & 0x00ff), (inst & 0xff00) >> 8)
# The relocation handler expects a little-endian result, so flip it around.
result = (raw[3] << 24) | (raw[2] << 16) | (raw[1] << 8) | raw[0]
l.debug("%s relocated as R_ARM_THM_CALL with new instruction: %#x", self.symbol.name, result)
return result
class R_ARM_COPY(generic.GenericCopyReloc):
pass
class R_ARM_GLOB_DAT(generic.GenericJumpslotReloc):
pass
class R_ARM_JUMP_SLOT(generic.GenericJumpslotReloc):
pass
class R_ARM_RELATIVE(generic.GenericRelativeReloc):
pass
class R_ARM_ABS32_NOI(generic.GenericAbsoluteAddendReloc):
pass
class R_ARM_REL32_NOI(generic.GenericPCRelativeAddendReloc):
pass
class R_ARM_TLS_DTPMOD32(generic.GenericTLSModIdReloc):
pass
class R_ARM_TLS_DTPOFF32(generic.GenericTLSDoffsetReloc):
pass
class R_ARM_TLS_TPOFF32(generic.GenericTLSOffsetReloc):
pass
class R_ARM_JUMP24(R_ARM_CALL):
pass
class R_ARM_PC24(R_ARM_CALL):
pass
# EDG says: Implementing these the easy way.
# Inaccuracies may exist. This is ARM, after all.
class R_ARM_THM_JUMP24(R_ARM_THM_CALL):
pass
class R_ARM_THM_JUMP19(R_ARM_THM_CALL):
pass
class R_ARM_THM_JUMP6(R_ARM_THM_CALL):
pass
| |
"""Support for scanning a network with nmap."""
from __future__ import annotations
import logging
from typing import Any, Callable
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN as DEVICE_TRACKER_DOMAIN,
PLATFORM_SCHEMA as DEVICE_TRACKER_PLATFORM_SCHEMA,
SOURCE_TYPE_ROUTER,
)
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
CONF_SCAN_INTERVAL,
DEFAULT_CONSIDER_HOME,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_EXCLUDE, CONF_HOSTS
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.typing import ConfigType
from . import NmapDevice, NmapDeviceScanner, short_hostname, signal_device_update
from .const import (
CONF_HOME_INTERVAL,
CONF_OPTIONS,
DEFAULT_OPTIONS,
DOMAIN,
TRACKER_SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = DEVICE_TRACKER_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOSTS): cv.ensure_list,
vol.Required(CONF_HOME_INTERVAL, default=0): cv.positive_int,
vol.Required(
CONF_CONSIDER_HOME, default=DEFAULT_CONSIDER_HOME.total_seconds()
): cv.time_period,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_OPTIONS, default=DEFAULT_OPTIONS): cv.string,
}
)
async def async_get_scanner(hass: HomeAssistant, config: ConfigType) -> None:
"""Validate the configuration and return a Nmap scanner."""
validated_config = config[DEVICE_TRACKER_DOMAIN]
if CONF_SCAN_INTERVAL in validated_config:
scan_interval = validated_config[CONF_SCAN_INTERVAL].total_seconds()
else:
scan_interval = TRACKER_SCAN_INTERVAL
if CONF_CONSIDER_HOME in validated_config:
consider_home = validated_config[CONF_CONSIDER_HOME].total_seconds()
else:
consider_home = DEFAULT_CONSIDER_HOME.total_seconds()
import_config = {
CONF_HOSTS: ",".join(validated_config[CONF_HOSTS]),
CONF_HOME_INTERVAL: validated_config[CONF_HOME_INTERVAL],
CONF_CONSIDER_HOME: consider_home,
CONF_EXCLUDE: ",".join(validated_config[CONF_EXCLUDE]),
CONF_OPTIONS: validated_config[CONF_OPTIONS],
CONF_SCAN_INTERVAL: scan_interval,
}
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=import_config,
)
)
_LOGGER.warning(
"Your Nmap Tracker configuration has been imported into the UI, "
"please remove it from configuration.yaml. "
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up device tracker for Nmap Tracker component."""
nmap_tracker = hass.data[DOMAIN][entry.entry_id]
@callback
def device_new(mac_address):
"""Signal a new device."""
async_add_entities([NmapTrackerEntity(nmap_tracker, mac_address, True)])
@callback
def device_missing(mac_address):
"""Signal a missing device."""
async_add_entities([NmapTrackerEntity(nmap_tracker, mac_address, False)])
entry.async_on_unload(
async_dispatcher_connect(hass, nmap_tracker.signal_device_new, device_new)
)
entry.async_on_unload(
async_dispatcher_connect(
hass, nmap_tracker.signal_device_missing, device_missing
)
)
class NmapTrackerEntity(ScannerEntity):
"""An Nmap Tracker entity."""
def __init__(
self, nmap_tracker: NmapDeviceScanner, mac_address: str, active: bool
) -> None:
"""Initialize an nmap tracker entity."""
self._mac_address = mac_address
self._nmap_tracker = nmap_tracker
self._tracked = self._nmap_tracker.devices.tracked
self._active = active
@property
def _device(self) -> NmapDevice:
"""Get latest device state."""
return self._tracked[self._mac_address]
@property
def is_connected(self) -> bool:
"""Return device status."""
return self._active
@property
def name(self) -> str:
"""Return device name."""
return self._device.name
@property
def unique_id(self) -> str:
"""Return device unique id."""
return self._mac_address
@property
def ip_address(self) -> str:
"""Return the primary ip address of the device."""
return self._device.ipv4
@property
def mac_address(self) -> str:
"""Return the mac address of the device."""
return self._mac_address
@property
def hostname(self) -> str | None:
"""Return hostname of the device."""
if not self._device.hostname:
return None
return short_hostname(self._device.hostname)
@property
def source_type(self) -> str:
"""Return tracker source type."""
return SOURCE_TYPE_ROUTER
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return {
"connections": {(CONNECTION_NETWORK_MAC, self._mac_address)},
"default_manufacturer": self._device.manufacturer,
"default_name": self.name,
}
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def icon(self) -> str:
"""Return device icon."""
return "mdi:lan-connect" if self._active else "mdi:lan-disconnect"
@callback
def async_process_update(self, online: bool) -> None:
"""Update device."""
self._active = online
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the attributes."""
return {
"last_time_reachable": self._device.last_update.isoformat(
timespec="seconds"
),
"reason": self._device.reason,
}
@callback
def async_on_demand_update(self, online: bool) -> None:
"""Update state."""
self.async_process_update(online)
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Register state update callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
signal_device_update(self._mac_address),
self.async_on_demand_update,
)
)
| |
#!/usr/bin/env python
#
# A script that takes an scons-src-{version}.zip file, unwraps it in
# a temporary location, and calls runtest.py to execute one or more of
# its tests.
#
# The default is to download the latest scons-src archive from the SCons
# web site, and to execute all of the tests.
#
# With a little more work, this will become the basis of an automated
# testing and reporting system that anyone will be able to use to
# participate in testing SCons on their system and regularly reporting
# back the results. A --xml option is a stab at gathering a lot of
# relevant information about the system, the Python version, etc.,
# so that problems on different platforms can be identified sooner.
#
import atexit
import getopt
import imp
import os
import os.path
import sys
import tempfile
import time
import zipfile
try:
# try Python 3.x style
from urllib.request import urlretrieve
except ImportError:
# nope, must be 2.x; this hack is equivalent
import imp
# protect import from fixer
urlretrieve = imp.load_module('urllib',
*imp.find_module('urllib')).urlretrieve
helpstr = """\
Usage: scons-test.py [-f zipfile] [-o outdir] [-v] [--xml] [runtest arguments]
Options:
-f FILE Specify input .zip FILE name
-o DIR, --out DIR Change output directory name to DIR
-v, --verbose Print file names when extracting
--xml XML output
"""
opts, args = getopt.getopt(sys.argv[1:],
"f:o:v",
['file=', 'out=', 'verbose', 'xml'])
format = None
outdir = None
printname = lambda x: x
inputfile = 'http://scons.sourceforge.net/scons-src-latest.zip'
for o, a in opts:
if o == '-f' or o == '--file':
inputfile = a
elif o == '-o' or o == '--out':
outdir = a
elif o == '-v' or o == '--verbose':
def printname(x):
print x
elif o == '--xml':
format = o
startdir = os.getcwd()
tempfile.template = 'scons-test.'
tempdir = tempfile.mktemp()
if not os.path.exists(tempdir):
os.mkdir(tempdir)
def cleanup(tempdir=tempdir):
import shutil
os.chdir(startdir)
shutil.rmtree(tempdir)
atexit.register(cleanup)
# Fetch the input file if it happens to be across a network somewhere.
# Ohmigod, does Python make this simple...
inputfile, headers = urlretrieve(inputfile)
# Unzip the header file in the output directory. We use our own code
# (lifted from scons-unzip.py) to make the output subdirectory name
# match the basename of the .zip file.
zf = zipfile.ZipFile(inputfile, 'r')
if outdir is None:
name, _ = os.path.splitext(os.path.basename(inputfile))
outdir = os.path.join(tempdir, name)
def outname(n, outdir=outdir):
l = []
while True:
n, tail = os.path.split(n)
if not n:
break
l.append(tail)
l.append(outdir)
l.reverse()
return os.path.join(*l)
for name in zf.namelist():
dest = outname(name)
dir = os.path.dirname(dest)
try:
os.makedirs(dir)
except:
pass
printname(dest)
# if the file exists, then delete it before writing
# to it so that we don't end up trying to write to a symlink:
if os.path.isfile(dest) or os.path.islink(dest):
os.unlink(dest)
if not os.path.isdir(dest):
open(dest, 'w').write(zf.read(name))
os.chdir(outdir)
# Load (by hand) the SCons modules we just unwrapped so we can
# extract their version information. Note that we have to override
# SCons.Script.main() with a do_nothing() function, because loading up
# the 'scons' script will actually try to execute SCons...
src_script = os.path.join(outdir, 'src', 'script')
src_engine = os.path.join(outdir, 'src', 'engine')
src_engine_SCons = os.path.join(src_engine, 'SCons')
fp, pname, desc = imp.find_module('SCons', [src_engine])
SCons = imp.load_module('SCons', fp, pname, desc)
fp, pname, desc = imp.find_module('Script', [src_engine_SCons])
SCons.Script = imp.load_module('Script', fp, pname, desc)
def do_nothing():
pass
SCons.Script.main = do_nothing
fp, pname, desc = imp.find_module('scons', [src_script])
scons = imp.load_module('scons', fp, pname, desc)
fp.close()
# Default is to run all the tests by passing the -a flags to runtest.py.
if not args:
runtest_args = '-a'
else:
runtest_args = ' '.join(args)
if format == '--xml':
print "<scons_test_run>"
print " <sys>"
sys_keys = ['byteorder', 'exec_prefix', 'executable', 'maxint', 'maxunicode', 'platform', 'prefix', 'version', 'version_info']
for k in sys_keys:
print " <%s>%s</%s>" % (k, sys.__dict__[k], k)
print " </sys>"
fmt = '%a %b %d %H:%M:%S %Y'
print " <time>"
print " <gmtime>%s</gmtime>" % time.strftime(fmt, time.gmtime())
print " <localtime>%s</localtime>" % time.strftime(fmt, time.localtime())
print " </time>"
print " <tempdir>%s</tempdir>" % tempdir
def print_version_info(tag, module):
print " <%s>" % tag
print " <version>%s</version>" % module.__version__
print " <build>%s</build>" % module.__build__
print " <buildsys>%s</buildsys>" % module.__buildsys__
print " <date>%s</date>" % module.__date__
print " <developer>%s</developer>" % module.__developer__
print " </%s>" % tag
print " <scons>"
print_version_info("script", scons)
print_version_info("engine", SCons)
print " </scons>"
environ_keys = [
'PATH',
'SCONSFLAGS',
'SCONS_LIB_DIR',
'PYTHON_ROOT',
'QTDIR',
'COMSPEC',
'INTEL_LICENSE_FILE',
'INCLUDE',
'LIB',
'MSDEVDIR',
'OS',
'PATHEXT',
'SystemRoot',
'TEMP',
'TMP',
'USERNAME',
'VXDOMNTOOLS',
'WINDIR',
'XYZZY'
'ENV',
'HOME',
'LANG',
'LANGUAGE',
'LOGNAME',
'MACHINE',
'OLDPWD',
'PWD',
'OPSYS',
'SHELL',
'TMPDIR',
'USER',
]
print " <environment>"
for key in sorted(environ_keys):
value = os.environ.get(key)
if value:
print " <variable>"
print " <name>%s</name>" % key
print " <value>%s</value>" % value
print " </variable>"
print " </environment>"
command = '"%s" runtest.py -q -o - --xml %s' % (sys.executable, runtest_args)
#print command
os.system(command)
print "</scons_test_run>"
else:
def print_version_info(tag, module):
print "\t%s: v%s.%s, %s, by %s on %s" % (tag,
module.__version__,
module.__build__,
module.__date__,
module.__developer__,
module.__buildsys__)
print "SCons by Steven Knight et al.:"
print_version_info("script", scons)
print_version_info("engine", SCons)
command = '"%s" runtest.py %s' % (sys.executable, runtest_args)
#print command
os.system(command)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
"""This test creates two top level actors and one sub-actor and
verifies that the actors can exchange sequences of messages."""
import time
from thespian.actors import *
from thespian.test import *
class rosaline(Actor):
name = 'Rosaline'
class Romeo(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, JulietAppears):
self.send(msg.juliet, "But, soft! what light through yonder window breaks?")
elif isinstance(msg, ActorExitRequest):
pass # nothing special, just die
elif msg == 'Ay me!':
self.send(sender, 'She speaks!')
elif msg == 'O Romeo, Romeo! wherefore art thou Romeo?':
self.send(sender, 'Shall I hear more, or shall I speak at this?')
elif 'rose' in msg:
pass # wait for it
elif 'sweet' in msg:
self.send(sender, 'Like softest music to attending ears!')
elif 'hello' in msg:
print('Hello from %s'%(str(self)))
elif 'who_are_you' == msg:
self.send(sender, self.myAddress)
# otherwise sit and swoon
class Capulet(Actor):
def receiveMessage(self, msg, sender):
if msg == "has a daughter?":
self.send(sender, self.createActor(Juliet))
class Juliet(Actor):
def __init__(self, *args, **kw):
self.nurse = None
self.recalled = False
super(Juliet, self).__init__(*args, **kw)
def receiveMessage(self, msg, sender):
if isinstance(msg, ActorExitRequest):
pass # nothing special, just die
elif "what light" in msg:
self.send(sender, 'Ay me!')
elif msg == 'She speaks!':
self.send(sender, 'O Romeo, Romeo! wherefore art thou Romeo?')
elif msg == 'Shall I hear more, or shall I speak at this?':
self.send(sender, "What's in a name? That which we call a rose")
self.send(sender, "By any other name would smell as sweet")
elif msg == 'Like softest music to attending ears!':
if self.nurse:
self.send(self.nurse, 'Anon, good nurse!')
else:
self.recalled = True
elif msg == 'Mistress!':
self.nurse = sender
if self.recalled:
self.send(self.nurse, 'Anon, good nurse!')
elif 'who_are_you' == msg:
self.send(sender, self.myAddress)
class Nurse(Actor):
def __init__(self, *args, **kw):
self.heardItAll = False
super(Nurse, self).__init__(*args, **kw)
def receiveMessage(self, msg, sender):
if type(msg) == type((1,2)) and msg[0] == 'begin':
self.send(msg[1], JulietAppears(msg[2]))
self.send(msg[2], 'Mistress!')
elif msg == 'Anon, good nurse!':
self.heardItAll = True
elif msg == 'done?':
self.send(sender, 'Fini' if self.heardItAll else 'not yet')
class JulietAppears:
stage = 'Right'
def __init__(self, julietAddr):
self.juliet = julietAddr
class TestFuncActors():
def test01_ActorSystemStartupShutdown(self, asys):
rosalineA = asys.createActor(rosaline)
# just finish, make sure no exception is thrown.
def test01_1_ActorSystemMultipleShutdown(self, asys):
rosalineA = asys.createActor(rosaline)
asys.shutdown()
asys.shutdown()
def test02_PrimaryActorCreation(self, asys):
romeo = asys.createActor(Romeo)
juliet = asys.createActor(Juliet)
assert romeo != juliet
def test03_CreateActorUniqueAddress(self, asys):
romeo = asys.createActor(Romeo)
juliet = asys.createActor(Juliet)
assert romeo != juliet
romeo2 = asys.createActor(Romeo)
assert romeo != romeo2
def NOtest04_PossibleActorSystemResourceExhaustion(self):
try:
addresses = [asys.createActor(Juliet) for n in range(10000)]
except OSError as err:
import errno
if err.errno == errno.EGAIN:
pass
else:
raise
def test05_ManyActorsUniqueAddress(self, asys):
addresses = [asys.createActor(Juliet) for n in range(50)]
uniqueAddresses = []
duplicates = []
for A in addresses:
if A in uniqueAddresses:
duplicates.append(A)
else:
uniqueAddresses.append(A)
if len(addresses) != len(uniqueAddresses):
print('Duplicates: %s'%map(str, duplicates))
if duplicates:
for each in duplicates:
print('... %s at: %s'%(str(each), str([N for N,A in enumerate(addresses) if A == each])))
print('Note: if this is a UDPTransport test, be advised that Linux occasionally does seem to assign the same UDP port multiple times. Linux bug?')
assert len(addresses) == len(uniqueAddresses)
def test06_ManyActorsValidAddresses(self, asys):
import string
addresses = [asys.createActor(Juliet) for n in range(100)]
for addr in addresses:
invchar = ''.join([c for c in str(addr)
if c not in string.ascii_letters + string.digits + "-~/():., '|>"])
assert str(addr) == str(addr) + invchar # invchar should be blank
if asys.base_name.startswith('multiprocUDP'):
# Normally the asys.shutdown() following this test will
# shutdown all actors, but for the multiprocUDP base, the
# ActorSystem (and logger) process are left behind because
# UDP does not have guaranteed delivery and 100 processes
# sending a UDP message to the ActorSystem nearly
# simultaneously overloads and drops packets. Use a more
# regulated shutdown here for UDP to avoid this overflow
# (which does not hurt anything but leaves actor processes
# behind).
per_loop = 10
for ii in range(0, len(addresses), per_loop):
for jj in range(ii, ii + per_loop):
asys.tell(addresses[jj], ActorExitRequest())
time.sleep(0.25)
def test07_SingleNonListeningActorTell(self, asys):
rosalineA = asys.createActor(rosaline)
# rosaline does not override the receiveMessage method, so the
# Actor default method will throw an exception. This will
# Kill the rosaline Actor. It's a top level Actor, so it will
# not be restarted. This will cause the 'hello' message to be
# delivered to the DeadLetterBox. Verify that no exception
# makes its way out of the ActorSystem here.
asys.tell(rosalineA, 'hello')
assert True
def test08_SingleActorTell(self, asys):
romeoA = asys.createActor(Romeo)
asys.tell(romeoA, 'hello')
# Nothing much happens, Romeo is smitten and has no time for trivialities, but
# he will try to generate str() of himself.
def test09_SingleActorAsk(self, asys):
romeoA = asys.createActor(Romeo)
resp = asys.ask(romeoA, 'O Romeo, Romeo! wherefore art thou Romeo?', 1)
assert resp, 'Shall I hear more == or shall I speak at this?'
def test10_ActorAskWithNoResponse(self, asys):
romeoA = asys.createActor(Romeo)
# This test is possibly unique to the simpleSystemBase, which
# will run an process all messages on an ask (or tell) call.
# Properly there is no way to determine if an answer is
# forthcoming from an asynchronous system, so all this can do
# is assert that there is no response within a particular time
# period. At this point, timing is not supported, so this
# test is underspecified and assumptive.
resp = asys.ask(romeoA, "What's in a name? That which we call a rose", 1.5)
assert resp is None
# Now verify that the Actor and system are still alive and operating normally.
resp = asys.ask(romeoA, 'O Romeo, Romeo! wherefore art thou Romeo?', 1)
assert resp, 'Shall I hear more == or shall I speak at this?'
def test11_SingleActorAskMultipleTimes(self, asys):
romeoA = asys.createActor(Romeo)
r = asys.ask(romeoA, 'O Romeo, Romeo! wherefore art thou Romeo?', 1)
assert r == 'Shall I hear more, or shall I speak at this?'
r = asys.ask(romeoA, 'O Romeo, Romeo! wherefore art thou Romeo?', 1)
assert r == 'Shall I hear more, or shall I speak at this?'
r = asys.ask(romeoA, 'Ay me!', 1)
assert r == 'She speaks!'
r = asys.ask(romeoA, 'O Romeo, Romeo! wherefore art thou Romeo?', 1)
assert r == 'Shall I hear more, or shall I speak at this?'
def test12_MultipleActorsAskMultipleTimes(self, asys):
romeo = asys.createActor(Romeo)
r = asys.ask(romeo, 'O Romeo, Romeo! wherefore art thou Romeo?', 1)
assert r == 'Shall I hear more, or shall I speak at this?'
juliet = asys.createActor(Juliet)
r = asys.ask(romeo, 'O Romeo, Romeo! wherefore art thou Romeo?', 1)
assert r == 'Shall I hear more, or shall I speak at this?'
r = asys.ask(romeo, 'Ay me!', 1)
assert r == 'She speaks!'
r = asys.ask(juliet, 'She speaks!', 1)
assert r == 'O Romeo, Romeo! wherefore art thou Romeo?'
r = asys.ask(romeo, 'Ay me!', 1)
assert r == 'She speaks!'
r = asys.ask(juliet, "Do you know what light that is?", 1)
assert r == 'Ay me!'
def test13_SubActorCreation(self, asys):
capulet = asys.createActor(Capulet)
juliet = asys.ask(capulet, 'has a daughter?', 2.5)
print ('Juliet is: %s'%str(juliet))
assert juliet is not None
if juliet:
r = asys.ask(juliet, 'what light?')
assert r == 'Ay me!', 0.75
juliet2 = asys.ask(capulet, 'has a daughter?', 1)
assert juliet2 is not None
if juliet2:
r = asys.ask(juliet2, 'what light?', 0.5)
assert r == 'Ay me!'
r = asys.ask(juliet, 'what light?', 0.5)
assert r == 'Ay me!'
def test14_EntireActWithActorStart(self, asys):
romeo = asys.createActor(Romeo)
juliet = asys.createActor(Juliet)
nurse = asys.createActor(Nurse)
assert asys.ask(nurse, 'done?', 1) == 'not yet'
asys.tell(nurse, ('begin', romeo, juliet))
for X in range(50):
if asys.ask(nurse, 'done?', 1) == 'Fini':
break
time.sleep(0.01) # Allow some time for the entire act
r = asys.ask(nurse, 'done?', 1)
assert r == 'Fini'
def test15_IncompleteActMissingActor(self, asys):
romeo = asys.createActor(Romeo)
juliet = asys.createActor(Juliet)
# no nurse actor created
asys.tell(romeo, JulietAppears(juliet))
# No error should occur here when Juliet reaches the end and
# doesn't have a nurse to tell.
time.sleep(0.05) # Allow some time for the entire act
# Now create the nurse and tell her to talk to romeo and
# juliet, which should cause completion
nurse = asys.createActor(Nurse)
r = asys.ask(nurse, 'done?', 1)
assert r == 'not yet'
asys.tell(nurse, ('begin', romeo, juliet))
for X in range(50):
if asys.ask(nurse, 'done?', 1) == 'Fini':
break
time.sleep(0.01) # Allow some time for the entire act
r = asys.ask(nurse, 'done?', 1)
assert r == 'Fini'
def test16_ActorProperties(self, asys):
romeo = asys.createActor(Romeo)
juliet = asys.createActor(Juliet)
r = asys.ask(romeo, 'who_are_you', 0.25)
assert r is not None
r = asys.ask(juliet, 'who_are_you', 0.25)
assert r is not None
r1 = asys.ask(romeo, 'who_are_you', 0.25)
r2 = asys.ask(juliet, 'who_are_you', 0.25)
assert r1 != r2
| |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import time
from cement.utils.misc import minimal_logger
from ebcli.objects.platform import PlatformVersion
from ebcli.resources.statics import namespaces, option_names
from ebcli.objects.solutionstack import SolutionStack
from ebcli.objects.exceptions import NotFoundError, InvalidStateError, \
AlreadyExistsError
from ebcli.objects.tier import Tier
from ebcli.lib import aws
from ebcli.lib.aws import InvalidParameterValueError
from ebcli.objects.event import Event
from ebcli.objects.environment import Environment
from ebcli.objects.application import Application
from ebcli.resources.strings import strings, responses
LOG = minimal_logger(__name__)
DEFAULT_ROLE_NAME = 'aws-elasticbeanstalk-ec2-role'
def _make_api_call(operation_name, **operation_options):
return aws.make_api_call('elasticbeanstalk',
operation_name,
**operation_options)
def delete_platform(arn):
LOG.debug('Inside delete_platform api wrapper')
return _make_api_call('delete_platform_version',
PlatformArn=arn,
DeleteResources=True)
def _make_equal_filter(filter_type, filter_value):
return { 'Type': filter_type, 'Operator': '=', 'Values': [filter_value] }
def list_platform_versions(platform_name=None, platform_version=None, status=None, owner=None):
kwargs = dict()
result_filters = []
if platform_name is not None:
result_filters.append(_make_equal_filter('PlatformName', platform_name))
if platform_version is not None:
result_filters.append(_make_equal_filter('PlatformVersion', platform_version))
if status:
result_filters.append(_make_equal_filter('PlatformStatus', status))
if owner:
result_filters.append(_make_equal_filter('PlatformOwner', owner))
kwargs['Filters'] = result_filters
LOG.debug('Inside list_platform_versions api wrapper')
platforms, nextToken = _list_platform_versions(kwargs)
while nextToken is not None:
time.sleep(0.1) # To avoid throttling we sleep for 100ms before requesting the next page
next_platforms, nextToken = _list_platform_versions(kwargs, nextToken)
platforms = platforms + next_platforms
return platforms
def describe_platform_version(arn):
LOG.debug('Inside describe_platform_version api wrapper')
return _make_api_call('describe_platform_version',
PlatformArn=arn)
def _list_platform_versions(kwargs, nextToken=None):
if nextToken is not None:
# Sleep for 100ms before pulling the next page
time.sleep(0.1)
kwargs['NextToken'] = nextToken
response = _make_api_call(
'list_platform_versions',
**kwargs
)
platforms = response['PlatformSummaryList']
try:
nextToken = response['NextToken']
except KeyError:
nextToken = None
return platforms, nextToken
def create_application(app_name, descrip):
LOG.debug('Inside create_application api wrapper')
try:
result = _make_api_call('create_application',
ApplicationName=app_name,
Description=descrip)
except InvalidParameterValueError as e:
string = responses['app.exists'].replace('{app-name}', app_name)
if e.message == string:
raise AlreadyExistsError(e)
else:
raise e
return result
def create_platform_version(platform_name, version, s3_bucket, s3_key, instance_profile, key_name, instance_type, vpc = None):
kwargs = dict()
if s3_bucket and s3_key:
kwargs['PlatformDefinitionBundle'] = {'S3Bucket': s3_bucket, 'S3Key': s3_key}
option_settings = []
if instance_profile:
option_settings.append({
'Namespace': namespaces.LAUNCH_CONFIGURATION,
'OptionName': option_names.IAM_INSTANCE_PROFILE,
'Value': instance_profile
})
if key_name:
option_settings.append({
'Namespace': namespaces.LAUNCH_CONFIGURATION,
'OptionName': option_names.EC2_KEY_NAME,
'Value': key_name
})
if instance_type:
option_settings.append({
'Namespace': namespaces.LAUNCH_CONFIGURATION,
'OptionName': option_names.INSTANCE_TYPE,
'Value': instance_type
})
if vpc:
if vpc['id']:
option_settings.append({
'Namespace': namespaces.VPC,
'OptionName': option_names.VPC_ID,
'Value': vpc['id']
})
if vpc['subnets']:
option_settings.append({
'Namespace': namespaces.VPC,
'OptionName': option_names.SUBNETS,
'Value': vpc['subnets']
})
if vpc['publicip']:
option_settings.append({
'Namespace': namespaces.VPC,
'OptionName': option_names.PUBLIC_IP,
'Value': 'true'
})
# Always enable healthd for the Platform Builder environment
option_settings.append({
'Namespace': namespaces.HEALTH_SYSTEM,
'OptionName': option_names.SYSTEM_TYPE,
'Value': 'enhanced'
})
# Attach service role
option_settings.append({
'Namespace': namespaces.ENVIRONMENT,
'OptionName': option_names.SERVICE_ROLE,
'Value': 'aws-elasticbeanstalk-service-role'
})
LOG.debug('Inside create_platform_version api wrapper')
return _make_api_call('create_platform_version',
PlatformName=platform_name,
PlatformVersion=version,
OptionSettings=option_settings,
**kwargs)
def create_application_version(app_name, vers_label, descrip, s3_bucket,
s3_key, process=False, repository=None, commit_id=None, build_configuration=None):
kwargs = dict()
kwargs['Process'] = process
if descrip is not None:
kwargs['Description'] = descrip
if s3_bucket and s3_key:
if build_configuration is None:
kwargs['SourceBundle'] = {'S3Bucket': s3_bucket,
'S3Key': s3_key}
else:
kwargs['SourceBuildInformation'] = {'SourceType': 'Zip',
'SourceRepository': 'S3',
'SourceLocation': "{0}/{1}".format(s3_bucket, s3_key)}
elif repository and commit_id:
kwargs['SourceBuildInformation'] = {'SourceType': 'Git',
'SourceRepository': 'CodeCommit',
'SourceLocation': "{0}/{1}".format(repository, commit_id)}
kwargs['Process'] = True
if build_configuration is not None:
kwargs['BuildConfiguration'] = {"CodeBuildServiceRole": build_configuration.service_role,
"Image": build_configuration.image,
"ComputeType": build_configuration.compute_type,
"TimeoutInMinutes": build_configuration.timeout}
kwargs['Process'] = True
LOG.debug('Inside create_application_version api wrapper')
return _make_api_call('create_application_version',
ApplicationName=app_name,
VersionLabel=vers_label,
**kwargs)
def create_environment(environment):
"""
Creates an Elastic Beanstalk environment
"""
LOG.debug('Inside create_environment api wrapper')
kwargs = environment.convert_to_kwargs()
if environment.database:
# need to know region for database string
region = aws.get_region_name()
# Database is a dictionary
kwargs['TemplateSpecification'] = {
'TemplateSnippets': [
{'SnippetName': 'RdsExtensionEB',
'Order': 10000,
'SourceUrl': 'https://s3.amazonaws.com/'
'elasticbeanstalk-env-resources-' + region +
'/eb_snippets/rds/rds.json'}
]
}
result = _make_api_call('create_environment', **kwargs)
# convert to object
env = _api_to_environment(result)
request_id = result['ResponseMetadata']['RequestId']
return env, request_id
def clone_environment(clone):
LOG.debug('Inside clone_environment api wrapper')
kwargs = clone.convert_to_kwargs()
kwargs['TemplateSpecification'] = \
{'TemplateSource': {'EnvironmentName': clone.original_name}}
result = _make_api_call('create_environment', **kwargs)
# convert to object
env = _api_to_environment(result)
request_id = result['ResponseMetadata']['RequestId']
return env, request_id
def _api_to_environment(api_dict, want_solution_stack = False):
# Convert solution_stack and tier to objects
try:
if want_solution_stack:
solution_stack_name = api_dict['SolutionStackName']
platform = SolutionStack(solution_stack_name)
else:
platform_arn = api_dict['PlatformArn']
platform = PlatformVersion(platform_arn)
except KeyError:
platform = SolutionStack(api_dict['SolutionStackName'])
tier = api_dict['Tier']
tier = Tier(tier['Name'], tier['Type'], tier['Version'])
env = Environment(
version_label=api_dict.get('VersionLabel'),
status=api_dict.get('Status'),
app_name=api_dict.get('ApplicationName'),
health=api_dict.get('Health'),
id=api_dict.get('EnvironmentId'),
date_updated=api_dict.get('DateUpdated'),
platform=platform,
description=api_dict.get('Description'),
name=api_dict.get('EnvironmentName'),
date_created=api_dict.get('DateCreated'),
tier=tier,
cname=api_dict.get('CNAME', 'UNKNOWN'),
option_settings=api_dict.get('OptionSettings'),
is_abortable=api_dict.get('AbortableOperationInProgress', False),
environment_links=api_dict.get('EnvironmentLinks')
)
return env
def delete_application(app_name):
LOG.debug('Inside delete_application api wrapper')
result = _make_api_call('delete_application',
ApplicationName=app_name)
return result['ResponseMetadata']['RequestId']
def delete_application_version(app_name, version_label):
LOG.debug('Inside delete_application_version api wrapper')
result = _make_api_call('delete_application_version',
ApplicationName=app_name,
VersionLabel=version_label,
DeleteSourceBundle=True)
return result['ResponseMetadata']['RequestId']
def delete_application_and_envs(app_name):
LOG.debug('Inside delete_application_and_envs')
result = _make_api_call('delete_application',
ApplicationName=app_name,
TerminateEnvByForce=True)
return result['ResponseMetadata']['RequestId']
def describe_application(app_name):
LOG.debug('Inside describe_application api wrapper')
result = _make_api_call('describe_applications',
ApplicationNames=[app_name])
apps = result['Applications']
if len(apps) != 1:
raise NotFoundError('Application "' + app_name + '" not found.')
return apps[0]
def is_cname_available(cname):
LOG.debug('Inside is_cname_available api wrapper')
result = _make_api_call('check_dns_availability',
CNAMEPrefix=cname)
return result['Available']
def swap_environment_cnames(source_env, dest_env):
LOG.debug('Inside swap_environment_cnames api wrapper')
result = _make_api_call('swap_environment_cnames',
SourceEnvironmentName=source_env,
DestinationEnvironmentName=dest_env)
return result['ResponseMetadata']['RequestId']
def describe_applications():
LOG.debug('Inside describe_applications api wrapper')
result = _make_api_call('describe_applications')
return result['Applications']
def application_exist(app_name):
try:
describe_application(app_name)
except NotFoundError:
return False
return True
def describe_configuration_settings(app_name, env_name):
LOG.debug('Inside describe_configuration_settings api wrapper')
result = _make_api_call('describe_configuration_settings',
ApplicationName=app_name,
EnvironmentName=env_name)
return result['ConfigurationSettings'][0]
def get_option_setting_from_environment(app_name, env_name, namespace, option):
env = describe_configuration_settings(app_name, env_name)
try:
option_settings = env['OptionSettings']
return get_option_setting(option_settings, namespace, option)
except KeyError:
return None
def get_option_setting(option_settings, namespace, option):
for setting in option_settings:
if setting['Namespace'] == namespace and \
setting['OptionName'] == option:
try:
return setting['Value']
except KeyError:
return None
return None
def create_option_setting(namespace, option, value):
return {
'Namespace': namespace,
'OptionName': option,
'Value': value
}
def get_specific_configuration(env_config, namespace, option):
return get_option_setting(env_config['OptionSettings'], namespace, option)
def get_specific_configuration_for_env(app_name, env_name, namespace, option):
env_config = describe_configuration_settings(app_name, env_name)
return get_specific_configuration(env_config, namespace, option)
def get_available_solution_stacks(fail_on_empty_response=True):
LOG.debug('Inside get_available_solution_stacks api wrapper')
result = _make_api_call('list_available_solution_stacks')
stack_strings = result['SolutionStacks']
LOG.debug('Solution Stack result size = ' + str(len(stack_strings)))
if fail_on_empty_response and len(stack_strings) == 0:
raise NotFoundError(strings['sstacks.notfound'])
solution_stacks = [SolutionStack(s) for s in stack_strings]
return solution_stacks
def get_application_versions(app_name, version_labels=None, max_records=None, next_token=None):
LOG.debug('Inside get_application_versions api wrapper')
kwargs = {}
if version_labels:
kwargs['VersionLabels'] = version_labels
if max_records:
kwargs['MaxRecords'] = max_records
if next_token:
time.sleep(0.1) # To avoid throttling we sleep for 100ms before requesting the next page
kwargs['NextToken'] = next_token
result = _make_api_call('describe_application_versions',
ApplicationName=app_name,
**kwargs)
return result
def get_all_applications():
LOG.debug('Inside get_all_applications api wrapper')
result = _make_api_call('describe_applications')
app_list = []
for app in result['Applications']:
try:
description = app['Description']
except KeyError:
description = None
try:
versions = app['Versions']
except KeyError:
versions = None
app_list.append(
Application(
name=app['ApplicationName'],
date_created=app['DateCreated'],
date_updated=app['DateUpdated'],
description=description,
versions=versions,
templates=app['ConfigurationTemplates'],
)
)
return app_list
def get_raw_app_environments(app_name, include_deleted=False, deleted_back_to=None):
LOG.debug('Inside get_app_environments api wrapper')
kwargs = {}
if include_deleted and deleted_back_to is not None:
kwargs['IncludedDeletedBackTo'] = deleted_back_to
result = _make_api_call('describe_environments',
ApplicationName=app_name,
IncludeDeleted=include_deleted,
**kwargs)
return result['Environments']
def get_app_environments(app_name, include_deleted=False, deleted_back_to=None):
LOG.debug('Inside get_app_environments api wrapper')
kwargs = {}
if include_deleted and deleted_back_to is not None:
kwargs['IncludedDeletedBackTo'] = deleted_back_to
result = _make_api_call('describe_environments',
ApplicationName=app_name,
IncludeDeleted=include_deleted,
**kwargs)
# convert to objects
envs = [_api_to_environment(env) for env in result['Environments']]
return envs
def get_all_environments():
LOG.debug('Inside get_all_environments api wrapper')
result = _make_api_call('describe_environments',
IncludeDeleted=False)
# convert to object
envs = []
for env in result['Environments']:
envs.append(_api_to_environment(env))
return envs
def get_environment(app_name, env_name, env_id=None, include_deleted=False, deleted_back_to=None, want_solution_stack=False):
LOG.debug('Inside get_environment api wrapper')
kwargs = {}
if app_name is not None:
kwargs['ApplicationName'] = app_name
if env_name is not None:
kwargs['EnvironmentNames'] = [env_name]
if env_id is not None:
kwargs['EnvironmentIds'] = [env_id]
if include_deleted and deleted_back_to is not None:
kwargs['IncludedDeletedBackTo'] = deleted_back_to
result = _make_api_call('describe_environments',
IncludeDeleted=include_deleted,
**kwargs)
envs = result['Environments']
if len(envs) < 1:
env_str = env_id if env_name is None else env_name
raise NotFoundError('Environment "' + env_str + '" not Found.')
else:
return _api_to_environment(envs[0], want_solution_stack)
def get_environments(env_names=[]):
LOG.debug('Inside get_environments api wrapper')
result = _make_api_call('describe_environments',
EnvironmentNames=env_names,
IncludeDeleted=False)
envs = result['Environments']
if len(envs) < 1:
raise NotFoundError('Could not find any environments '
'from the list: [' + env_names + ']')
return [_api_to_environment(env) for env in envs]
def get_environment_settings(app_name, env_name):
LOG.debug('Inside get_environment_settings api wrapper')
result = _make_api_call('describe_configuration_settings',
ApplicationName=app_name,
EnvironmentName=env_name)
return _api_to_environment(result['ConfigurationSettings'][0])
def get_environment_resources(env_name):
LOG.debug('Inside get_environment_resources api wrapper')
result = _make_api_call('describe_environment_resources',
EnvironmentName=env_name)
return result
def get_new_events(app_name, env_name, request_id,
last_event_time=None, version_label=None, platform_arn=None):
LOG.debug('Inside get_new_events api wrapper')
# make call
if last_event_time is not None:
# In python 2 time is a datetime, in 3 it is a string
## Convert to string for compatibility
time = last_event_time
new_time = time + datetime.timedelta(0, 0, 1000)
else:
new_time = None
kwargs = {}
if app_name:
kwargs['ApplicationName'] = app_name
if version_label:
kwargs['VersionLabel'] = version_label
if env_name:
kwargs['EnvironmentName'] = env_name
if request_id:
kwargs['RequestId'] = request_id
if new_time:
kwargs['StartTime'] = str(new_time)
if platform_arn:
kwargs['PlatformArn'] = platform_arn
result = _make_api_call('describe_events',
**kwargs)
# convert to object
events = []
for event in result['Events']:
try:
version_label = event['VersionLabel']
except KeyError:
version_label = None
try:
environment_name = event['EnvironmentName']
except KeyError:
environment_name = None
try:
app_name = event['ApplicationName']
except KeyError:
app_name = None
events.append(
Event(message=event['Message'],
event_date=event['EventDate'],
version_label=version_label,
app_name=app_name,
environment_name=environment_name,
severity=event['Severity'],
platform=platform_arn
)
)
return events
def get_storage_location():
LOG.debug('Inside get_storage_location api wrapper')
response = _make_api_call('create_storage_location')
return response['S3Bucket']
def update_environment(env_name, options, remove=None,
template=None, template_body=None,
solution_stack_name=None,
platform_arn=None):
LOG.debug('Inside update_environment api wrapper')
if remove is None:
remove = []
kwargs = {
'EnvironmentName': env_name,
}
if options:
kwargs['OptionSettings'] = options
if remove:
kwargs['OptionsToRemove'] = remove
if template:
kwargs['TemplateName'] = template
if template_body:
kwargs['TemplateSpecification'] = \
{'TemplateSource':
{'SourceContents': template_body}}
if solution_stack_name:
kwargs['SolutionStackName'] = solution_stack_name
if platform_arn:
kwargs['PlatformArn'] = platform_arn
try:
response = _make_api_call('update_environment',
**kwargs)
except aws.InvalidParameterValueError as e:
if e.message == responses['env.invalidstate'].replace('{env-name}',
env_name):
raise InvalidStateError(e)
else:
raise
return response['ResponseMetadata']['RequestId']
def abort_environment_update(env_name):
LOG.debug('Inside abort_environment_update')
result = _make_api_call('abort_environment_update',
EnvironmentName=env_name)
return result['ResponseMetadata']['RequestId']
def update_application_resource_lifecycle(app_name, resource_config):
LOG.debug('Inside update_application_resource_lifecycle api wrapper')
response = _make_api_call('update_application_resource_lifecycle',
ApplicationName=app_name,
ResourceLifecycleConfig=resource_config)
return response
def update_env_application_version(env_name,
version_label,
group_name):
LOG.debug('Inside update_env_application_version api wrapper')
if group_name:
response = _make_api_call('update_environment',
EnvironmentName=env_name,
VersionLabel=version_label,
GroupName=group_name)
else:
response = _make_api_call('update_environment',
EnvironmentName=env_name,
VersionLabel=version_label)
return response['ResponseMetadata']['RequestId']
def request_environment_info(env_name, info_type):
result = _make_api_call('request_environment_info',
EnvironmentName=env_name,
InfoType=info_type)
return result
def retrieve_environment_info(env_name, info_type):
result = _make_api_call('retrieve_environment_info',
EnvironmentName=env_name,
InfoType=info_type)
return result
def terminate_environment(env_name, force_terminate=False):
result = _make_api_call('terminate_environment',
EnvironmentName=env_name,
ForceTerminate=force_terminate)
return result['ResponseMetadata']['RequestId']
def create_configuration_template(app_name, env_name, template_name,
description):
kwargs = {
'TemplateName': template_name,
'ApplicationName': app_name,
'Description': description,
'TemplateSpecification':
{'TemplateSource':
{'EnvironmentName': env_name}},
}
try:
result = _make_api_call('create_configuration_template', **kwargs)
except InvalidParameterValueError as e:
if e.message == responses['cfg.nameexists'].replace('{name}',
template_name):
raise AlreadyExistsError(e.message)
else:
raise
return result
def delete_configuration_template(app_name, template_name):
_make_api_call('delete_configuration_template',
ApplicationName=app_name,
TemplateName=template_name)
def validate_template(app_name, template_name, platform=None):
kwargs = {}
if platform:
if PlatformVersion.is_valid_arn(platform):
kwargs['TemplateSpecification'] = \
{'TemplateSource':
{'PlatformArn': platform}}
else:
kwargs['TemplateSpecification'] = \
{'TemplateSource':
{'SolutionStackName': platform}}
result = _make_api_call('validate_configuration_settings',
ApplicationName=app_name,
TemplateName=template_name,
**kwargs)
return result
def describe_template(app_name, template_name):
LOG.debug('Inside describe_template api wrapper')
result = _make_api_call('describe_configuration_settings',
ApplicationName=app_name,
TemplateName=template_name)
return result['ConfigurationSettings'][0]
def get_environment_health(env_name, attributes=None):
if attributes is None:
attributes = [
"HealthStatus",
"Status",
"Color",
"Causes",
"ApplicationMetrics",
"InstancesHealth",
"RefreshedAt",
]
result = _make_api_call('describe_environment_health',
EnvironmentName=env_name,
AttributeNames=attributes)
return result
def get_instance_health(env_name, next_token=None, attributes=None):
if attributes is None:
attributes = [
"HealthStatus",
"Color",
"Causes",
"ApplicationMetrics",
"RefreshedAt",
"LaunchedAt",
"System",
"Deployment",
"AvailabilityZone",
"InstanceType",
]
kwargs = {}
if next_token:
time.sleep(0.1) # To avoid throttling we sleep for 100ms before requesting the next page
kwargs['NextToken'] = next_token
result = _make_api_call('describe_instances_health',
EnvironmentName=env_name,
AttributeNames=attributes,
**kwargs)
return result
def compose_environments(application_name, version_labels_list, group_name=None):
kwargs = {}
if group_name is not None:
kwargs['GroupName'] = group_name
result = _make_api_call('compose_environments',
ApplicationName=application_name,
VersionLabels=version_labels_list,
**kwargs)
request_id = result['ResponseMetadata']['RequestId']
return request_id
def rebuild_environment(env_id=None, env_name=None):
kwargs = {}
if env_name is not None:
kwargs['EnvironmentName'] = env_name
if env_id is not None:
kwargs['EnvironmentId'] = env_id
result = _make_api_call('rebuild_environment',
**kwargs)
request_id = result['ResponseMetadata']['RequestId']
return request_id
| |
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.tests.watchers.test_datastore_utils
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
import json
from collections import defaultdict
from security_monkey.datastore import Account, Technology, AccountType, ItemAudit
from security_monkey.tests import SecurityMonkeyTestCase, db
from security_monkey.watcher import ChangeItem
from security_monkey import ARN_PREFIX
ACTIVE_CONF = {
"account_number": "012345678910",
"technology": "iamrole",
"region": "universal",
"name": "SomeRole",
"policy": {
"Statement": [
{
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
},
"Arn": ARN_PREFIX + ":iam::012345678910:role/SomeRole"
}
class SomeTestItem(ChangeItem):
def __init__(self, account=None, name=None, arn=None, config=None):
super(SomeTestItem, self).__init__(
index="iamrole",
region='universal',
account=account,
name=name,
arn=arn,
new_config=config or {})
@classmethod
def from_slurp(cls, role, **kwargs):
return cls(
account=kwargs['account_name'],
name=role['name'],
config=role,
arn=role['Arn'])
class SomeWatcher:
def __init__(self):
self.ephemeral_paths = []
class DatabaseUtilsTestCase(SecurityMonkeyTestCase):
def tearDown(self):
import security_monkey.auditor
security_monkey.auditor.auditor_registry = defaultdict(list)
super(DatabaseUtilsTestCase, self).tearDown()
def setup_db(self):
account_type_result = AccountType(name='AWS')
db.session.add(account_type_result)
db.session.commit()
self.account = Account(identifier="012345678910", name="testing",
account_type_id=account_type_result.id)
self.technology = Technology(name="iamrole")
db.session.add(self.account)
db.session.add(self.technology)
db.session.commit()
def test_is_active(self):
from security_monkey.datastore_utils import is_active
not_active = {"Arn": ARN_PREFIX + ":iam::012345678910:role/someDeletedRole"}
assert not is_active(not_active)
still_not_active = {
"account_number": "012345678910",
"technology": "iamrole",
"region": "universal",
"name": "somethingThatWasDeleted"
}
assert not is_active(still_not_active)
assert is_active(ACTIVE_CONF)
def test_create_revision(self):
from security_monkey.datastore_utils import create_revision
from security_monkey.datastore import Item
self.setup_db()
db_item = Item(region="universal",
name="SomeRole",
arn=ARN_PREFIX + ":iam::012345678910:role/SomeRole",
tech_id=self.technology.id,
account_id=self.account.id
)
db.session.add(db_item)
db.session.commit()
revision = create_revision(ACTIVE_CONF, db_item)
assert revision
assert revision.active
assert json.dumps(revision.config) == json.dumps(ACTIVE_CONF)
assert revision.item_id == db_item.id
def test_create_item_aws(self):
from security_monkey.datastore_utils import create_item_aws
self.setup_db()
sti = SomeTestItem.from_slurp(ACTIVE_CONF, account_name=self.account.name)
item = create_item_aws(sti, self.technology, self.account)
assert item.region == "universal"
assert item.name == "SomeRole"
assert item.arn == ARN_PREFIX + ":iam::012345678910:role/SomeRole"
assert item.tech_id == self.technology.id
assert item.account_id == self.account.id
def test_hash_item(self):
from security_monkey.datastore_utils import hash_item
test_config = {
"SomeDurableProp": "is some value",
"ephemeralPath": "some thing that changes",
"some_area": {
"some_nested_place": {
"Durable": True
},
"ephemeral": True
}
}
ephemeral_paths = [
"ephemeralPath",
"some_area*$ephemeral"
]
# Ran the first time -- verified that this is correct:
original_complete_hash = "85b8874a7ca98d7f5f4587d80d310bc5"
durable_hash = "1d1d718ea820b14f620f5262ae6d06fb"
assert hash_item(test_config, ephemeral_paths) == (original_complete_hash, durable_hash)
# Change a durable value:
test_config["SomeDurableProp"] = "is some OTHER value"
assert hash_item(test_config, ephemeral_paths) != (original_complete_hash, durable_hash)
# Go back:
test_config["SomeDurableProp"] = "is some value"
assert hash_item(test_config, ephemeral_paths) == (original_complete_hash, durable_hash)
# Change ephemeral values:
test_config["ephemeralPath"] = "askldjfpwojf0239f32"
test_ephemeral = hash_item(test_config, ephemeral_paths)
assert test_ephemeral[0] != original_complete_hash
assert test_ephemeral[1] == durable_hash
def test_result_from_item(self):
from security_monkey.datastore_utils import result_from_item
from security_monkey.datastore import Item
self.setup_db()
item = Item(region="universal",
name="SomeRole",
arn=ARN_PREFIX + ":iam::012345678910:role/SomeRole",
tech_id=self.technology.id,
account_id=self.account.id
)
# This is actually what is passed into result_from_item:
sti = SomeTestItem().from_slurp(ACTIVE_CONF, account_name=self.account.name)
assert not result_from_item(sti, self.account, self.technology)
db.session.add(item)
db.session.commit()
assert result_from_item(sti, self.account, self.technology).id == item.id
def test_detect_change(self):
from security_monkey.datastore_utils import detect_change, hash_item
from security_monkey.datastore import Item
self.setup_db()
item = Item(region="universal",
name="SomeRole",
arn=ARN_PREFIX + ":iam::012345678910:role/SomeRole",
tech_id=self.technology.id,
account_id=self.account.id,
)
sti = SomeTestItem().from_slurp(ACTIVE_CONF, account_name=self.account.name)
# Get the hash:
complete_hash, durable_hash = hash_item(sti.config, [])
# Item does not exist in the DB yet:
assert (True, 'durable', None, 'created') == detect_change(sti, self.account, self.technology, complete_hash,
durable_hash)
# Add the item to the DB:
db.session.add(item)
db.session.commit()
# Durable change (nothing hashed in DB yet)
assert (True, 'durable', item, 'changed') == detect_change(sti, self.account, self.technology, complete_hash,
durable_hash)
# No change:
item.latest_revision_complete_hash = complete_hash
item.latest_revision_durable_hash = durable_hash
db.session.add(item)
db.session.commit()
assert (False, None, item, None) == detect_change(sti, self.account, self.technology, complete_hash,
durable_hash)
# Ephemeral change:
mod_conf = dict(ACTIVE_CONF)
mod_conf["IGNORE_ME"] = "I am ephemeral!"
complete_hash, durable_hash = hash_item(mod_conf, ["IGNORE_ME"])
assert (True, 'ephemeral', item, None) == detect_change(sti, self.account, self.technology, complete_hash,
durable_hash)
def test_persist_item(self):
from security_monkey.datastore_utils import persist_item, hash_item, result_from_item
self.setup_db()
sti = SomeTestItem().from_slurp(ACTIVE_CONF, account_name=self.account.name)
# Get the hash:
complete_hash, durable_hash = hash_item(sti.config, [])
# Persist a durable change:
persist_item(sti, None, self.technology, self.account, complete_hash, durable_hash, True)
db_item = result_from_item(sti, self.account, self.technology)
assert db_item
assert db_item.revisions.count() == 1
assert db_item.latest_revision_durable_hash == durable_hash == complete_hash
assert db_item.latest_revision_complete_hash == complete_hash == durable_hash
# No changes:
persist_item(sti, db_item, self.technology, self.account, complete_hash, durable_hash, True)
db_item = result_from_item(sti, self.account, self.technology)
assert db_item
assert db_item.revisions.count() == 1
assert db_item.latest_revision_durable_hash == complete_hash == durable_hash
assert db_item.latest_revision_complete_hash == complete_hash == durable_hash
# Ephemeral change:
mod_conf = dict(ACTIVE_CONF)
mod_conf["IGNORE_ME"] = "I am ephemeral!"
new_complete_hash, new_durable_hash = hash_item(mod_conf, ["IGNORE_ME"])
sti = SomeTestItem().from_slurp(mod_conf, account_name=self.account.name)
persist_item(sti, db_item, self.technology, self.account, new_complete_hash, new_durable_hash, False)
db_item = result_from_item(sti, self.account, self.technology)
assert db_item
assert db_item.revisions.count() == 1
assert db_item.latest_revision_durable_hash == new_durable_hash == durable_hash
assert db_item.latest_revision_complete_hash == new_complete_hash != complete_hash
def test_inactivate_old_revisions(self):
from security_monkey.datastore_utils import inactivate_old_revisions, hash_item, persist_item, result_from_item
from security_monkey.datastore import ItemRevision, Item
self.setup_db()
# Need to create 3 items first before we can test deletions:
for x in range(0, 3):
modConf = dict(ACTIVE_CONF)
modConf["name"] = "SomeRole{}".format(x)
modConf["Arn"] = ARN_PREFIX + ":iam::012345678910:role/SomeRole{}".format(x)
sti = SomeTestItem().from_slurp(modConf, account_name=self.account.name)
# Get the hash:
complete_hash, durable_hash = hash_item(sti.config, [])
# persist:
persist_item(sti, None, self.technology, self.account, complete_hash, durable_hash, True)
db_item = result_from_item(sti, self.account, self.technology)
# Add issues for these items: (just add two for testing purposes)
db.session.add(ItemAudit(score=10,
issue="IAM Role has full admin permissions.",
notes=json.dumps(sti.config),
item_id=db_item.id))
db.session.add(ItemAudit(score=9001, issue="Some test issue", notes="{}", item_id=db_item.id))
db.session.commit()
# Now, actually test for deleted revisions:
arns = [
ARN_PREFIX + ":iam::012345678910:role/SomeRole", # <-- Does not exist in the list
ARN_PREFIX + ":iam::012345678910:role/SomeRole0", # <-- Does exist -- should not get deleted
]
inactivate_old_revisions(SomeWatcher(), arns, self.account, self.technology)
# Check that SomeRole1 and SomeRole2 are marked as inactive:
for x in range(1, 3):
item_revision = ItemRevision.query.join((Item, ItemRevision.id == Item.latest_revision_id)).filter(
Item.arn == ARN_PREFIX + ":iam::012345678910:role/SomeRole{}".format(x),
).one()
assert not item_revision.active
# Check that the SomeRole0 is still OK:
item_revision = ItemRevision.query.join((Item, ItemRevision.id == Item.latest_revision_id)).filter(
Item.arn == ARN_PREFIX + ":iam::012345678910:role/SomeRole0").one()
assert len(ItemAudit.query.filter(ItemAudit.item_id == item_revision.item_id).all()) == 2
assert item_revision.active
| |
"""
Connect to a MySensors gateway via pymysensors API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mysensors/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.bootstrap import setup_component
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (ATTR_BATTERY_LEVEL, CONF_OPTIMISTIC,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP, STATE_OFF, STATE_ON)
from homeassistant.helpers import discovery
from homeassistant.loader import get_component
_LOGGER = logging.getLogger(__name__)
ATTR_NODE_ID = 'node_id'
ATTR_CHILD_ID = 'child_id'
ATTR_DESCRIPTION = 'description'
ATTR_DEVICE = 'device'
CONF_BAUD_RATE = 'baud_rate'
CONF_DEVICE = 'device'
CONF_DEBUG = 'debug'
CONF_GATEWAYS = 'gateways'
CONF_PERSISTENCE = 'persistence'
CONF_PERSISTENCE_FILE = 'persistence_file'
CONF_TCP_PORT = 'tcp_port'
CONF_TOPIC_IN_PREFIX = 'topic_in_prefix'
CONF_TOPIC_OUT_PREFIX = 'topic_out_prefix'
CONF_RETAIN = 'retain'
CONF_VERSION = 'version'
DEFAULT_VERSION = 1.4
DEFAULT_BAUD_RATE = 115200
DEFAULT_TCP_PORT = 5003
DOMAIN = 'mysensors'
GATEWAYS = None
MQTT_COMPONENT = 'mqtt'
REQUIREMENTS = [
'https://github.com/theolind/pymysensors/archive/'
'0b705119389be58332f17753c53167f551254b6c.zip#pymysensors==0.8']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_GATEWAYS): vol.All(cv.ensure_list, [
{
vol.Required(CONF_DEVICE): cv.string,
vol.Optional(CONF_PERSISTENCE_FILE): cv.string,
vol.Optional(
CONF_BAUD_RATE,
default=DEFAULT_BAUD_RATE): cv.positive_int,
vol.Optional(
CONF_TCP_PORT,
default=DEFAULT_TCP_PORT): cv.port,
vol.Optional(CONF_TOPIC_IN_PREFIX, default=''): cv.string,
vol.Optional(CONF_TOPIC_OUT_PREFIX, default=''): cv.string,
},
]),
vol.Optional(CONF_DEBUG, default=False): cv.boolean,
vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean,
vol.Optional(CONF_PERSISTENCE, default=True): cv.boolean,
vol.Optional(CONF_RETAIN, default=True): cv.boolean,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.Coerce(float),
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config): # pylint: disable=too-many-locals
"""Setup the MySensors component."""
import mysensors.mysensors as mysensors
version = config[DOMAIN].get(CONF_VERSION)
persistence = config[DOMAIN].get(CONF_PERSISTENCE)
def setup_gateway(device, persistence_file, baud_rate, tcp_port, in_prefix,
out_prefix):
"""Return gateway after setup of the gateway."""
# pylint: disable=too-many-arguments
if device == MQTT_COMPONENT:
if not setup_component(hass, MQTT_COMPONENT, config):
return
mqtt = get_component(MQTT_COMPONENT)
retain = config[DOMAIN].get(CONF_RETAIN)
def pub_callback(topic, payload, qos, retain):
"""Call mqtt publish function."""
mqtt.publish(hass, topic, payload, qos, retain)
def sub_callback(topic, callback, qos):
"""Call mqtt subscribe function."""
mqtt.subscribe(hass, topic, callback, qos)
gateway = mysensors.MQTTGateway(
pub_callback, sub_callback,
event_callback=None, persistence=persistence,
persistence_file=persistence_file,
protocol_version=version, in_prefix=in_prefix,
out_prefix=out_prefix, retain=retain)
else:
try:
socket.inet_aton(device)
# valid ip address
gateway = mysensors.TCPGateway(
device, event_callback=None, persistence=persistence,
persistence_file=persistence_file,
protocol_version=version, port=tcp_port)
except OSError:
# invalid ip address
gateway = mysensors.SerialGateway(
device, event_callback=None, persistence=persistence,
persistence_file=persistence_file,
protocol_version=version, baud=baud_rate)
gateway.metric = hass.config.units.is_metric
gateway.debug = config[DOMAIN].get(CONF_DEBUG)
optimistic = config[DOMAIN].get(CONF_OPTIMISTIC)
gateway = GatewayWrapper(gateway, optimistic, device)
# pylint: disable=attribute-defined-outside-init
gateway.event_callback = gateway.callback_factory()
def gw_start(event):
"""Callback to trigger start of gateway and any persistence."""
gateway.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
lambda event: gateway.stop())
if persistence:
for node_id in gateway.sensors:
gateway.event_callback('persistence', node_id)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, gw_start)
return gateway
# Setup all devices from config
global GATEWAYS
GATEWAYS = {}
conf_gateways = config[DOMAIN][CONF_GATEWAYS]
for index, gway in enumerate(conf_gateways):
device = gway[CONF_DEVICE]
persistence_file = gway.get(
CONF_PERSISTENCE_FILE,
hass.config.path('mysensors{}.pickle'.format(index + 1)))
baud_rate = gway.get(CONF_BAUD_RATE)
tcp_port = gway.get(CONF_TCP_PORT)
in_prefix = gway.get(CONF_TOPIC_IN_PREFIX)
out_prefix = gway.get(CONF_TOPIC_OUT_PREFIX)
GATEWAYS[device] = setup_gateway(
device, persistence_file, baud_rate, tcp_port, in_prefix,
out_prefix)
if GATEWAYS[device] is None:
GATEWAYS.pop(device)
if not GATEWAYS:
_LOGGER.error(
'No devices could be setup as gateways, check your configuration')
return False
for component in ['sensor', 'switch', 'light', 'binary_sensor', 'climate',
'cover']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
def pf_callback_factory(map_sv_types, devices, add_devices, entity_class):
"""Return a new callback for the platform."""
def mysensors_callback(gateway, node_id):
"""Callback for mysensors platform."""
if gateway.sensors[node_id].sketch_name is None:
_LOGGER.info('No sketch_name: node %s', node_id)
return
for child in gateway.sensors[node_id].children.values():
for value_type in child.values.keys():
key = node_id, child.id, value_type
if child.type not in map_sv_types or \
value_type not in map_sv_types[child.type]:
continue
if key in devices:
devices[key].update_ha_state(True)
continue
name = '{} {} {}'.format(
gateway.sensors[node_id].sketch_name, node_id, child.id)
if isinstance(entity_class, dict):
device_class = entity_class[child.type]
else:
device_class = entity_class
devices[key] = device_class(
gateway, node_id, child.id, name, value_type, child.type)
_LOGGER.info('Adding new devices: %s', devices[key])
add_devices([devices[key]])
if key in devices:
devices[key].update_ha_state(True)
return mysensors_callback
class GatewayWrapper(object):
"""Gateway wrapper class."""
# pylint: disable=too-few-public-methods
def __init__(self, gateway, optimistic, device):
"""Setup class attributes on instantiation.
Args:
gateway (mysensors.SerialGateway): Gateway to wrap.
optimistic (bool): Send values to actuators without feedback state.
device (str): Path to serial port, ip adress or mqtt.
Attributes:
_wrapped_gateway (mysensors.SerialGateway): Wrapped gateway.
platform_callbacks (list): Callback functions, one per platform.
optimistic (bool): Send values to actuators without feedback state.
device (str): Device configured as gateway.
__initialised (bool): True if GatewayWrapper is initialised.
"""
self._wrapped_gateway = gateway
self.platform_callbacks = []
self.optimistic = optimistic
self.device = device
self.__initialised = True
def __getattr__(self, name):
"""See if this object has attribute name."""
# Do not use hasattr, it goes into infinite recurrsion
if name in self.__dict__:
# This object has the attribute.
return getattr(self, name)
# The wrapped object has the attribute.
return getattr(self._wrapped_gateway, name)
def __setattr__(self, name, value):
"""See if this object has attribute name then set to value."""
if '_GatewayWrapper__initialised' not in self.__dict__:
return object.__setattr__(self, name, value)
elif name in self.__dict__:
object.__setattr__(self, name, value)
else:
object.__setattr__(self._wrapped_gateway, name, value)
def callback_factory(self):
"""Return a new callback function."""
def node_update(update_type, node_id):
"""Callback for node updates from the MySensors gateway."""
_LOGGER.debug('Update %s: node %s', update_type, node_id)
for callback in self.platform_callbacks:
callback(self, node_id)
return node_update
class MySensorsDeviceEntity(object):
"""Represent a MySensors entity."""
# pylint: disable=too-many-arguments
def __init__(
self, gateway, node_id, child_id, name, value_type, child_type):
"""
Setup class attributes on instantiation.
Args:
gateway (GatewayWrapper): Gateway object.
node_id (str): Id of node.
child_id (str): Id of child.
name (str): Entity name.
value_type (str): Value type of child. Value is entity state.
child_type (str): Child type of child.
Attributes:
gateway (GatewayWrapper): Gateway object.
node_id (str): Id of node.
child_id (str): Id of child.
_name (str): Entity name.
value_type (str): Value type of child. Value is entity state.
child_type (str): Child type of child.
battery_level (int): Node battery level.
_values (dict): Child values. Non state values set as state attributes.
mysensors (module): Mysensors main component module.
"""
self.gateway = gateway
self.node_id = node_id
self.child_id = child_id
self._name = name
self.value_type = value_type
self.child_type = child_type
self._values = {}
@property
def should_poll(self):
"""Mysensor gateway pushes its state to HA."""
return False
@property
def name(self):
"""The name of this entity."""
return self._name
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
attr = {
ATTR_BATTERY_LEVEL: node.battery_level,
ATTR_CHILD_ID: self.child_id,
ATTR_DESCRIPTION: child.description,
ATTR_DEVICE: self.gateway.device,
ATTR_NODE_ID: self.node_id,
}
set_req = self.gateway.const.SetReq
for value_type, value in self._values.items():
try:
attr[set_req(value_type).name] = value
except ValueError:
_LOGGER.error('Value_type %s is not valid for mysensors '
'version %s', value_type,
self.gateway.protocol_version)
return attr
@property
def available(self):
"""Return True if entity is available."""
return self.value_type in self._values
def update(self):
"""Update the controller with the latest value from a sensor."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
set_req = self.gateway.const.SetReq
for value_type, value in child.values.items():
_LOGGER.debug(
"%s: value_type %s, value = %s", self._name, value_type, value)
if value_type in (set_req.V_ARMED, set_req.V_LIGHT,
set_req.V_LOCK_STATUS, set_req.V_TRIPPED):
self._values[value_type] = (
STATE_ON if int(value) == 1 else STATE_OFF)
elif value_type == set_req.V_DIMMER:
self._values[value_type] = int(value)
else:
self._values[value_type] = value
| |
from .estimator_base import *
class H2ORandomForestEstimator(H2OEstimator):
def __init__(self, model_id=None, mtries=None, sample_rate=None, build_tree_one_node=None,
ntrees=None, max_depth=None, min_rows=None, nbins=None, nbins_cats=None,
binomial_double_trees=None, balance_classes=None, max_after_balance_size=None,
seed=None, nfolds=None, fold_assignment=None,
stopping_rounds=None, stopping_metric=None, stopping_tolerance=None,
score_each_iteration=None, keep_cross_validation_predictions=None, checkpoint=None):
"""Builds a Random Forest Model on an H2OFrame
Parameters
----------
model_id : str, optional
The unique id assigned to the resulting model. If none is given, an id will
automatically be generated.
mtries : int
Number of variables randomly sampled as candidates at each split. If set to -1,
defaults to sqrt{p} for classification, and p/3 for regression, where p is the
number of predictors.
sample_rate : float
Sample rate, from 0 to 1.0.
build_tree_one_node : bool
Run on one node only; no network overhead but fewer CPUs used.
Suitable for small datasets.
ntrees : int
A non-negative integer that determines the number of trees to grow.
max_depth : int
Maximum depth to grow the tree.
min_rows : int
Minimum number of rows to assign to terminal nodes.
nbins : int
For numerical columns (real/int), build a histogram of (at least) this many bins,
then split at the best point.
nbins_top_level : int
For numerical columns (real/int), build a histogram of (at most) this many bins at
the root level, then decrease by factor of two per level.
nbins_cats : int
For categorical columns (factors), build a histogram of this many bins, then split
at the best point. Higher values can lead to more overfitting.
binomial_double_trees : bool
or binary classification: Build 2x as many trees (one per class) - can lead to
higher accuracy.
balance_classes : bool
logical, indicates whether or not to balance training data class counts via
over/under-sampling (for imbalanced data)
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts
(can be less than 1.0). Ignored if balance_classes is False,
which is the default behavior.
seed : int
Seed for random numbers (affects sampling) - Note: only reproducible when
running single threaded
nfolds : int, optional
Number of folds for cross-validation. If nfolds >= 2, then validation must
remain empty.
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified
Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
score_each_iteration : bool
Attempts to score each tree.
stopping_rounds : int
Early stopping based on convergence of stopping_metric.
Stop if simple moving average of length k of the stopping_metric does not improve
(by stopping_tolerance) for k=stopping_rounds scoring events.
Can only trigger after at least 2k scoring events. Use 0 to disable.
stopping_metric : str
Metric to use for convergence checking, only for _stopping_rounds > 0
Can be one of "AUTO", "deviance", "logloss", "MSE", "AUC", "r2", "misclassification".
stopping_tolerance : float
Relative tolerance for metric-based stopping criterion (stop if relative improvement
is not at least this much)
Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
"""
super(H2ORandomForestEstimator, self).__init__()
self._parms = locals()
self._parms = {k:v for k,v in self._parms.iteritems() if k!="self"}
@property
def mtries(self):
return self._parms["mtries"]
@mtries.setter
def mtries(self, value):
self._parms["mtries"] = value
@property
def sample_rate(self):
return self._parms["sample_rate"]
@sample_rate.setter
def sample_rate(self, value):
self._parms["sample_rate"] = value
@property
def build_tree_one_node(self):
return self._parms["build_tree_one_node"]
@build_tree_one_node.setter
def build_tree_one_node(self, value):
self._parms["build_tree_one_node"] = value
@property
def ntrees(self):
return self._parms["ntrees"]
@ntrees.setter
def ntrees(self, value):
self._parms["ntrees"] = value
@property
def max_depth(self):
return self._parms["max_depth"]
@max_depth.setter
def max_depth(self, value):
self._parms["max_depth"] = value
@property
def min_rows(self):
return self._parms["min_rows"]
@min_rows.setter
def min_rows(self, value):
self._parms["min_rows"] = value
@property
def nbins(self):
return self._parms["nbins"]
@nbins.setter
def nbins(self, value):
self._parms["nbins"] = value
@property
def nbins_cats(self):
return self._parms["nbins_cats"]
@nbins_cats.setter
def nbins_cats(self, value):
self._parms["nbins_cats"] = value
@property
def binomial_double_trees(self):
return self._parms["binomial_double_trees"]
@binomial_double_trees.setter
def binomial_double_trees(self, value):
self._parms["binomial_double_trees"] = value
@property
def balance_classes(self):
return self._parms["balance_classes"]
@balance_classes.setter
def balance_classes(self, value):
self._parms["balance_classes"] = value
@property
def max_after_balance_size(self):
return self._parms["max_after_balance_size"]
@max_after_balance_size.setter
def max_after_balance_size(self, value):
self._parms["max_after_balance_size"] = value
@property
def seed(self):
return self._parms["seed"]
@seed.setter
def seed(self, value):
self._parms["seed"] = value
@property
def nfolds(self):
return self._parms["nfolds"]
@nfolds.setter
def nfolds(self, value):
self._parms["nfolds"] = value
@property
def fold_assignment(self):
return self._parms["fold_assignment"]
@fold_assignment.setter
def fold_assignment(self, value):
self._parms["fold_assignment"] = value
@property
def keep_cross_validation_predictions(self):
return self._parms["keep_cross_validation_predictions"]
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, value):
self._parms["keep_cross_validation_predictions"] = value
@property
def score_each_iteration(self):
return self._parms["score_each_iteration"]
@score_each_iteration.setter
def score_each_iteration(self, value):
self._parms["score_each_iteration"] = value
@property
def stopping_rounds(self):
return self._parms["stopping_rounds"]
@stopping_rounds.setter
def stopping_rounds(self, value):
self._parms["stopping_rounds"] = value
@property
def stopping_metric(self):
return self._parms["stopping_metric"]
@stopping_metric.setter
def stopping_metric(self, value):
self._parms["stopping_metric"] = value
@property
def stopping_tolerance(self):
return self._parms["stopping_tolerance"]
@stopping_tolerance.setter
def stopping_tolerance(self, value):
self._parms["stopping_tolerance"] = value
@property
def checkpoint(self):
return self._parms["checkpoint"]
@checkpoint.setter
def checkpoint(self, value):
self._parms["checkpoint"] = value
| |
import re
from django import forms
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.forms import formsets, ValidationError
from django.views.generic import TemplateView
from django.utils.datastructures import SortedDict
from django.utils.decorators import classonlymethod
from django.utils.translation import ugettext as _
from django.utils import six
from django.contrib.formtools.wizard.storage import get_storage
from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured
from django.contrib.formtools.wizard.forms import ManagementForm
def normalize_name(name):
"""
Converts camel-case style names into underscore seperated words. Example::
>>> normalize_name('oneTwoThree')
'one_two_three'
>>> normalize_name('FourFiveSix')
'four_five_six'
"""
new = re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', name)
return new.lower().strip('_')
class StepsHelper(object):
def __init__(self, wizard):
self._wizard = wizard
def __dir__(self):
return self.all
def __len__(self):
return self.count
def __repr__(self):
return '<StepsHelper for %s (steps: %s)>' % (self._wizard, self.all)
@property
def all(self):
"Returns the names of all steps/forms."
return list(self._wizard.get_form_list())
@property
def count(self):
"Returns the total number of steps/forms in this the wizard."
return len(self.all)
@property
def current(self):
"""
Returns the current step. If no current step is stored in the
storage backend, the first step will be returned.
"""
return self._wizard.storage.current_step or self.first
@property
def first(self):
"Returns the name of the first step."
return self.all[0]
@property
def last(self):
"Returns the name of the last step."
return self.all[-1]
@property
def next(self):
"Returns the next step."
return self._wizard.get_next_step()
@property
def prev(self):
"Returns the previous step."
return self._wizard.get_prev_step()
@property
def index(self):
"Returns the index for the current step."
return self._wizard.get_step_index()
@property
def step0(self):
return int(self.index)
@property
def step1(self):
return int(self.index) + 1
class WizardView(TemplateView):
"""
The WizardView is used to create multi-page forms and handles all the
storage and validation stuff. The wizard is based on Django's generic
class based views.
"""
storage_name = None
form_list = None
initial_dict = None
instance_dict = None
condition_dict = None
template_name = 'formtools/wizard/wizard_form.html'
def __repr__(self):
return '<%s: forms: %s>' % (self.__class__.__name__, self.form_list)
@classonlymethod
def as_view(cls, *args, **kwargs):
"""
This method is used within urls.py to create unique wizardview
instances for every request. We need to override this method because
we add some kwargs which are needed to make the wizardview usable.
"""
initkwargs = cls.get_initkwargs(*args, **kwargs)
return super(WizardView, cls).as_view(**initkwargs)
@classmethod
def get_initkwargs(cls, form_list=None, initial_dict=None,
instance_dict=None, condition_dict=None, *args, **kwargs):
"""
Creates a dict with all needed parameters for the form wizard instances.
* `form_list` - is a list of forms. The list entries can be single form
classes or tuples of (`step_name`, `form_class`). If you pass a list
of forms, the wizardview will convert the class list to
(`zero_based_counter`, `form_class`). This is needed to access the
form for a specific step.
* `initial_dict` - contains a dictionary of initial data dictionaries.
The key should be equal to the `step_name` in the `form_list` (or
the str of the zero based counter - if no step_names added in the
`form_list`)
* `instance_dict` - contains a dictionary whose values are model
instances if the step is based on a ``ModelForm`` and querysets if
the step is based on a ``ModelFormSet``. The key should be equal to
the `step_name` in the `form_list`. Same rules as for `initial_dict`
apply.
* `condition_dict` - contains a dictionary of boolean values or
callables. If the value of for a specific `step_name` is callable it
will be called with the wizardview instance as the only argument.
If the return value is true, the step's form will be used.
"""
kwargs.update({
'initial_dict': initial_dict or kwargs.pop('initial_dict',
getattr(cls, 'initial_dict', None)) or {},
'instance_dict': instance_dict or kwargs.pop('instance_dict',
getattr(cls, 'instance_dict', None)) or {},
'condition_dict': condition_dict or kwargs.pop('condition_dict',
getattr(cls, 'condition_dict', None)) or {}
})
form_list = form_list or kwargs.pop('form_list',
getattr(cls, 'form_list', None)) or []
computed_form_list = SortedDict()
assert len(form_list) > 0, 'at least one form is needed'
# walk through the passed form list
for i, form in enumerate(form_list):
if isinstance(form, (list, tuple)):
# if the element is a tuple, add the tuple to the new created
# sorted dictionary.
computed_form_list[six.text_type(form[0])] = form[1]
else:
# if not, add the form with a zero based counter as unicode
computed_form_list[six.text_type(i)] = form
# walk through the new created list of forms
for form in six.itervalues(computed_form_list):
if issubclass(form, formsets.BaseFormSet):
# if the element is based on BaseFormSet (FormSet/ModelFormSet)
# we need to override the form variable.
form = form.form
# check if any form contains a FileField, if yes, we need a
# file_storage added to the wizardview (by subclassing).
for field in six.itervalues(form.base_fields):
if (isinstance(field, forms.FileField) and
not hasattr(cls, 'file_storage')):
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
# build the kwargs for the wizardview instances
kwargs['form_list'] = computed_form_list
return kwargs
def get_prefix(self, *args, **kwargs):
# TODO: Add some kind of unique id to prefix
return normalize_name(self.__class__.__name__)
def get_form_list(self):
"""
This method returns a form_list based on the initial form list but
checks if there is a condition method/value in the condition_list.
If an entry exists in the condition list, it will call/read the value
and respect the result. (True means add the form, False means ignore
the form)
The form_list is always generated on the fly because condition methods
could use data from other (maybe previous forms).
"""
form_list = SortedDict()
for form_key, form_class in six.iteritems(self.form_list):
# try to fetch the value from condition list, by default, the form
# gets passed to the new list.
condition = self.condition_dict.get(form_key, True)
if callable(condition):
# call the value if needed, passes the current instance.
condition = condition(self)
if condition:
form_list[form_key] = form_class
return form_list
def dispatch(self, request, *args, **kwargs):
"""
This method gets called by the routing engine. The first argument is
`request` which contains a `HttpRequest` instance.
The request is stored in `self.request` for later use. The storage
instance is stored in `self.storage`.
After processing the request using the `dispatch` method, the
response gets updated by the storage engine (for example add cookies).
"""
# add the storage engine to the current wizardview instance
self.prefix = self.get_prefix(*args, **kwargs)
self.storage = get_storage(self.storage_name, self.prefix, request,
getattr(self, 'file_storage', None))
self.steps = StepsHelper(self)
response = super(WizardView, self).dispatch(request, *args, **kwargs)
# update the response (e.g. adding cookies)
self.storage.update_response(response)
return response
def get(self, request, *args, **kwargs):
"""
This method handles GET requests.
If a GET request reaches this point, the wizard assumes that the user
just starts at the first step or wants to restart the process.
The data of the wizard will be resetted before rendering the first step.
"""
self.storage.reset()
# reset the current step to the first step.
self.storage.current_step = self.steps.first
return self.render(self.get_form())
def post(self, *args, **kwargs):
"""
This method handles POST requests.
The wizard will render either the current step (if form validation
wasn't successful), the next step (if the current step was stored
successful) or the done view (if no more steps are available)
"""
# Look for a wizard_goto_step element in the posted data which
# contains a valid step name. If one was found, render the requested
# form. (This makes stepping back a lot easier).
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
# Check if form was refreshed
management_form = ManagementForm(self.request.POST, prefix=self.prefix)
if not management_form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered.'),
code='missing_management_form',
)
form_current_step = management_form.cleaned_data['current_step']
if (form_current_step != self.steps.current and
self.storage.current_step is not None):
# form refreshed, change current step
self.storage.current_step = form_current_step
# get the form for the current step
form = self.get_form(data=self.request.POST, files=self.request.FILES)
# and try to validate
if form.is_valid():
# if the form is valid, store the cleaned data and files.
self.storage.set_step_data(self.steps.current, self.process_step(form))
self.storage.set_step_files(self.steps.current, self.process_step_files(form))
# check if the current step is the last step
if self.steps.current == self.steps.last:
# no more steps, render done view
return self.render_done(form, **kwargs)
else:
# proceed to the next step
return self.render_next_step(form)
return self.render(form)
def render_next_step(self, form, **kwargs):
"""
This method gets called when the next step/form should be rendered.
`form` contains the last/current form.
"""
# get the form instance based on the data from the storage backend
# (if available).
next_step = self.steps.next
new_form = self.get_form(next_step,
data=self.storage.get_step_data(next_step),
files=self.storage.get_step_files(next_step))
# change the stored current step
self.storage.current_step = next_step
return self.render(new_form, **kwargs)
def render_goto_step(self, goto_step, **kwargs):
"""
This method gets called when the current step has to be changed.
`goto_step` contains the requested step to go to.
"""
self.storage.current_step = goto_step
form = self.get_form(
data=self.storage.get_step_data(self.steps.current),
files=self.storage.get_step_files(self.steps.current))
return self.render(form)
def render_done(self, form, **kwargs):
"""
This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form don't
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`.
"""
final_form_list = []
# walk through the form list and try to validate the data again.
for form_key in self.get_form_list():
form_obj = self.get_form(step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key))
if not form_obj.is_valid():
return self.render_revalidation_failure(form_key, form_obj, **kwargs)
final_form_list.append(form_obj)
# render the done view and reset the wizard before returning the
# response. This is needed to prevent from rendering done with the
# same data twice.
done_response = self.done(final_form_list, **kwargs)
self.storage.reset()
return done_response
def get_form_prefix(self, step=None, form=None):
"""
Returns the prefix which will be used when calling the actual form for
the given step. `step` contains the step-name, `form` the form which
will be called with the returned prefix.
If no step is given, the form_prefix will determine the current step
automatically.
"""
if step is None:
step = self.steps.current
return str(step)
def get_form_initial(self, step):
"""
Returns a dictionary which will be passed to the form for `step`
as `initial`. If no initial data was provied while initializing the
form wizard, a empty dictionary will be returned.
"""
return self.initial_dict.get(step, {})
def get_form_instance(self, step):
"""
Returns a object which will be passed to the form for `step`
as `instance`. If no instance object was provied while initializing
the form wizard, None will be returned.
"""
return self.instance_dict.get(step, None)
def get_form_kwargs(self, step=None):
"""
Returns the keyword arguments for instantiating the form
(or formset) on the given step.
"""
return {}
def get_form(self, step=None, data=None, files=None):
"""
Constructs the form for a given `step`. If no `step` is defined, the
current step will be determined automatically.
The form will be initialized using the `data` argument to prefill the
new form. If needed, instance or queryset (for `ModelForm` or
`ModelFormSet`) will be added too.
"""
if step is None:
step = self.steps.current
# prepare the kwargs for the form instance.
kwargs = self.get_form_kwargs(step)
kwargs.update({
'data': data,
'files': files,
'prefix': self.get_form_prefix(step, self.form_list[step]),
'initial': self.get_form_initial(step),
})
if issubclass(self.form_list[step], forms.ModelForm):
# If the form is based on ModelForm, add instance if available
# and not previously set.
kwargs.setdefault('instance', self.get_form_instance(step))
elif issubclass(self.form_list[step], forms.models.BaseModelFormSet):
# If the form is based on ModelFormSet, add queryset if available
# and not previous set.
kwargs.setdefault('queryset', self.get_form_instance(step))
return self.form_list[step](**kwargs)
def process_step(self, form):
"""
This method is used to postprocess the form data. By default, it
returns the raw `form.data` dictionary.
"""
return self.get_form_step_data(form)
def process_step_files(self, form):
"""
This method is used to postprocess the form files. By default, it
returns the raw `form.files` dictionary.
"""
return self.get_form_step_files(form)
def render_revalidation_failure(self, step, form, **kwargs):
"""
Gets called when a form doesn't validate when rendering the done
view. By default, it changes the current step to failing forms step
and renders the form.
"""
self.storage.current_step = step
return self.render(form, **kwargs)
def get_form_step_data(self, form):
"""
Is used to return the raw form data. You may use this method to
manipulate the data.
"""
return form.data
def get_form_step_files(self, form):
"""
Is used to return the raw form files. You may use this method to
manipulate the data.
"""
return form.files
def get_all_cleaned_data(self):
"""
Returns a merged dictionary of all step cleaned_data dictionaries.
If a step contains a `FormSet`, the key will be prefixed with
'formset-' and contain a list of the formset cleaned_data dictionaries.
"""
cleaned_data = {}
for form_key in self.get_form_list():
form_obj = self.get_form(
step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key)
)
if form_obj.is_valid():
if isinstance(form_obj.cleaned_data, (tuple, list)):
cleaned_data.update({
'formset-%s' % form_key: form_obj.cleaned_data
})
else:
cleaned_data.update(form_obj.cleaned_data)
return cleaned_data
def get_cleaned_data_for_step(self, step):
"""
Returns the cleaned data for a given `step`. Before returning the
cleaned data, the stored values are revalidated through the form.
If the data doesn't validate, None will be returned.
"""
if step in self.form_list:
form_obj = self.get_form(step=step,
data=self.storage.get_step_data(step),
files=self.storage.get_step_files(step))
if form_obj.is_valid():
return form_obj.cleaned_data
return None
def get_next_step(self, step=None):
"""
Returns the next step after the given `step`. If no more steps are
available, None will be returned. If the `step` argument is None, the
current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
key = form_list.keyOrder.index(step) + 1
if len(form_list.keyOrder) > key:
return form_list.keyOrder[key]
return None
def get_prev_step(self, step=None):
"""
Returns the previous step before the given `step`. If there are no
steps available, None will be returned. If the `step` argument is
None, the current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
key = form_list.keyOrder.index(step) - 1
if key >= 0:
return form_list.keyOrder[key]
return None
def get_step_index(self, step=None):
"""
Returns the index for the given `step` name. If no step is given,
the current step will be used to get the index.
"""
if step is None:
step = self.steps.current
return self.get_form_list().keyOrder.index(step)
def get_context_data(self, form, **kwargs):
"""
Returns the template context for a step. You can overwrite this method
to add more data for all or some steps. This method returns a
dictionary containing the rendered form step. Available template
context variables are:
* all extra data stored in the storage backend
* `form` - form instance of the current step
* `wizard` - the wizard instance itself
Example:
.. code-block:: python
class MyWizard(WizardView):
def get_context_data(self, form, **kwargs):
context = super(MyWizard, self).get_context_data(form=form, **kwargs)
if self.steps.current == 'my_step_name':
context.update({'another_var': True})
return context
"""
context = super(WizardView, self).get_context_data(form=form, **kwargs)
context.update(self.storage.extra_data)
context['wizard'] = {
'form': form,
'steps': self.steps,
'management_form': ManagementForm(prefix=self.prefix, initial={
'current_step': self.steps.current,
}),
}
return context
def render(self, form=None, **kwargs):
"""
Returns a ``HttpResponse`` containing all needed context data.
"""
form = form or self.get_form()
context = self.get_context_data(form=form, **kwargs)
return self.render_to_response(context)
def done(self, form_list, **kwargs):
"""
This method must be overridden by a subclass to process to form data
after processing all steps.
"""
raise NotImplementedError("Your %s class has not defined a done() "
"method, which is required." % self.__class__.__name__)
class SessionWizardView(WizardView):
"""
A WizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieWizardView(WizardView):
"""
A WizardView with pre-configured CookieStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
class NamedUrlWizardView(WizardView):
"""
A WizardView with URL named steps support.
"""
url_name = None
done_step_name = None
@classmethod
def get_initkwargs(cls, *args, **kwargs):
"""
We require a url_name to reverse URLs later. Additionally users can
pass a done_step_name to change the URL name of the "done" view.
"""
assert 'url_name' in kwargs, 'URL name is needed to resolve correct wizard URLs'
extra_kwargs = {
'done_step_name': kwargs.pop('done_step_name', 'done'),
'url_name': kwargs.pop('url_name'),
}
initkwargs = super(NamedUrlWizardView, cls).get_initkwargs(*args, **kwargs)
initkwargs.update(extra_kwargs)
assert initkwargs['done_step_name'] not in initkwargs['form_list'], \
'step name "%s" is reserved for "done" view' % initkwargs['done_step_name']
return initkwargs
def get_step_url(self, step):
return reverse(self.url_name, kwargs={'step': step})
def get(self, *args, **kwargs):
"""
This renders the form or, if needed, does the http redirects.
"""
step_url = kwargs.get('step', None)
if step_url is None:
if 'reset' in self.request.GET:
self.storage.reset()
self.storage.current_step = self.steps.first
if self.request.GET:
query_string = "?%s" % self.request.GET.urlencode()
else:
query_string = ""
return redirect(self.get_step_url(self.steps.current)
+ query_string)
# is the current step the "done" name/view?
elif step_url == self.done_step_name:
last_step = self.steps.last
return self.render_done(self.get_form(step=last_step,
data=self.storage.get_step_data(last_step),
files=self.storage.get_step_files(last_step)
), **kwargs)
# is the url step name not equal to the step in the storage?
# if yes, change the step in the storage (if name exists)
elif step_url == self.steps.current:
# URL step name and storage step name are equal, render!
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_files,
), **kwargs)
elif step_url in self.get_form_list():
self.storage.current_step = step_url
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_files,
), **kwargs)
# invalid step name, reset to first and redirect.
else:
self.storage.current_step = self.steps.first
return redirect(self.get_step_url(self.steps.first))
def post(self, *args, **kwargs):
"""
Do a redirect if user presses the prev. step button. The rest of this
is super'd from WizardView.
"""
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
return super(NamedUrlWizardView, self).post(*args, **kwargs)
def get_context_data(self, form, **kwargs):
"""
NamedUrlWizardView provides the url_name of this wizard in the context
dict `wizard`.
"""
context = super(NamedUrlWizardView, self).get_context_data(form=form, **kwargs)
context['wizard']['url_name'] = self.url_name
return context
def render_next_step(self, form, **kwargs):
"""
When using the NamedUrlWizardView, we have to redirect to update the
browser's URL to match the shown step.
"""
next_step = self.get_next_step()
self.storage.current_step = next_step
return redirect(self.get_step_url(next_step))
def render_goto_step(self, goto_step, **kwargs):
"""
This method gets called when the current step has to be changed.
`goto_step` contains the requested step to go to.
"""
self.storage.current_step = goto_step
return redirect(self.get_step_url(goto_step))
def render_revalidation_failure(self, failed_step, form, **kwargs):
"""
When a step fails, we have to redirect the user to the first failing
step.
"""
self.storage.current_step = failed_step
return redirect(self.get_step_url(failed_step))
def render_done(self, form, **kwargs):
"""
When rendering the done view, we have to redirect first (if the URL
name doesn't fit).
"""
if kwargs.get('step', None) != self.done_step_name:
return redirect(self.get_step_url(self.done_step_name))
return super(NamedUrlWizardView, self).render_done(form, **kwargs)
class NamedUrlSessionWizardView(NamedUrlWizardView):
"""
A NamedUrlWizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class NamedUrlCookieWizardView(NamedUrlWizardView):
"""
A NamedUrlFormWizard with pre-configured CookieStorageBackend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access
class LSTMBlockCellTest(test.TestCase):
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = lstm_ops.LSTMBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))
output, _ = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])
})
self.assertEqual(len(res), 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testCompatibleNames(self):
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = rnn_cell.LSTMCell(10)
pcell = rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
basic_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockCell(10)
pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
block_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockFusedCell(10)
pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)
inputs = [array_ops.zeros([4, 5])] * 6
cell(inputs, dtype=dtypes.float32, scope="basic/lstm_cell")
pcell(inputs, dtype=dtypes.float32, scope="peephole/lstm_cell")
fused_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
self.assertEqual(basic_names, block_names)
self.assertEqual(basic_names, fused_names)
def testLSTMBasicToBlockCell(self):
with self.test_session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlockCellPeeping(self):
with self.test_session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[
rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)
for _ in range(2)
],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlock(self):
with self.test_session(use_gpu=True) as sess:
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 5
inputs = []
for _ in range(sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
cell = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([outputs, state[0]])
basic_grads = sess.run(gradients_impl.gradients(outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(outputs, variables.trainable_variables()))
with variable_scope.variable_scope("block", initializer=initializer):
w = variable_scope.get_variable(
"w",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
b = variable_scope.get_variable(
"b",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
_, _, _, _, _, _, outputs = block_lstm(
ops.convert_to_tensor(
sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
cell_clip=0)
sess.run([variables.global_variables_initializer()])
block_outputs = sess.run(outputs)
block_grads = sess.run(gradients_impl.gradients(outputs, inputs))
block_wgrads = sess.run(gradients_impl.gradients(outputs, [w, b]))
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
with variable_scope.variable_scope("fused", initializer=initializer):
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False)
outputs, state = cell(inputs, dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
fused_outputs, fused_state = sess.run([outputs, state[0]])
fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
]
fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=True) as sess:
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 5
inputs = []
for _ in range(sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
cell = rnn_cell.LSTMCell(
cell_size, use_peepholes=True, state_is_tuple=True)
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([outputs, state[0]])
basic_grads = sess.run(gradients_impl.gradients(outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(outputs, variables.trainable_variables()))
with variable_scope.variable_scope("block", initializer=initializer):
w = variable_scope.get_variable(
"w",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
b = variable_scope.get_variable(
"b",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
wci = variable_scope.get_variable(
"wci", shape=[cell_size], dtype=dtypes.float32)
wcf = variable_scope.get_variable(
"wcf", shape=[cell_size], dtype=dtypes.float32)
wco = variable_scope.get_variable(
"wco", shape=[cell_size], dtype=dtypes.float32)
_, _, _, _, _, _, outputs = block_lstm(
ops.convert_to_tensor(
sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
sess.run([variables.global_variables_initializer()])
block_outputs = sess.run(outputs)
block_grads = sess.run(gradients_impl.gradients(outputs, inputs))
block_wgrads = sess.run(
gradients_impl.gradients(outputs, [w, b, wci, wcf, wco]))
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
with variable_scope.variable_scope("fused", initializer=initializer):
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=True)
outputs, state = cell(inputs, dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
fused_outputs, fused_state = sess.run([outputs, state[0]])
fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
]
fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
def testLSTMFusedSequenceLengths(self):
"""Verify proper support for sequence lengths in LSTMBlockFusedCell."""
with self.test_session(use_gpu=True) as sess:
batch_size = 3
input_size = 4
cell_size = 5
max_sequence_length = 6
inputs = []
for _ in range(max_sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
seq_lengths = constant_op.constant([3, 4, 5])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
with variable_scope.variable_scope("basic", initializer=initializer):
cell = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
outputs, state = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=seq_lengths)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([outputs, state[0]])
basic_grads = sess.run(gradients_impl.gradients(outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(outputs, variables.trainable_variables()))
with variable_scope.variable_scope("fused", initializer=initializer):
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False)
outputs, state = cell(
inputs, dtype=dtypes.float32, sequence_length=seq_lengths)
sess.run([variables.global_variables_initializer()])
fused_outputs, fused_state = sess.run([outputs, state[0]])
fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
]
fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
# Verify that state propagation works if we turn our sequence into
# tiny (single-time) subsequences, i.e. unfuse the cell
with variable_scope.variable_scope(
"unfused", initializer=initializer) as vs:
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False)
outputs = []
state = None
for i, inp in enumerate(inputs):
lengths = [int(i < l) for l in seq_lengths.eval()]
output, state = cell(
[inp],
initial_state=state,
dtype=dtypes.float32,
sequence_length=lengths)
vs.reuse_variables()
outputs.append(output[0])
outputs = array_ops.stack(outputs)
sess.run([variables.global_variables_initializer()])
unfused_outputs, unfused_state = sess.run([outputs, state[0]])
unfused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
unfused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("unfused/")
]
unfused_wgrads = sess.run(
gradients_impl.gradients(outputs, unfused_vars))
self.assertAllClose(basic_outputs, unfused_outputs)
self.assertAllClose(basic_state, unfused_state)
self.assertAllClose(basic_grads, unfused_grads)
for basic, unfused in zip(basic_wgrads, unfused_wgrads):
self.assertAllClose(basic, unfused, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from nailgun.db.sqlalchemy import models
from nailgun.network.manager import NetworkManager
from nailgun import objects
from nailgun.orchestrator.deployment_graph import AstuteGraph
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.test.integration.test_orchestrator_serializer import \
BaseDeploymentSerializer
class TestDeploymentAttributesSerialization70(BaseDeploymentSerializer):
@mock.patch.object(models.Release, 'environment_version',
new_callable=mock.PropertyMock(return_value='7.0'))
def setUp(self, *args):
super(TestDeploymentAttributesSerialization70, self).setUp()
self.cluster = self.create_env('ha_compact')
# NOTE: 'prepare_for_deployment' is going to be changed for 7.0
objects.NodeCollection.prepare_for_deployment(self.env.nodes, 'vlan')
cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
serializer = get_serializer_for_cluster(cluster_db)
self.serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(cluster_db, cluster_db.nodes)
def create_env(self, mode):
return self.env.create(
cluster_kwargs={
'mode': mode,
'net_provider': 'neutron',
'net_segment_type': 'vlan'},
nodes_kwargs=[
{'roles': ['controller'],
'pending_addition': True,
'name': self.node_name,
}
])
def test_network_scheme(self):
for node in self.serialized_for_astute:
roles = node['network_scheme']['roles']
expected_roles = {
'admin/pxe': 'br-fw-admin',
'keystone/api': 'br-mgmt',
'neutron/api': 'br-mgmt',
'swift/api': 'br-mgmt',
'sahara/api': 'br-mgmt',
'ceilometer/api': 'br-mgmt',
'cinder/api': 'br-mgmt',
'glance/api': 'br-mgmt',
'heat/api': 'br-mgmt',
'nova/api': 'br-mgmt',
'murano/api': 'br-mgmt',
'horizon': 'br-mgmt',
'mgmt/api': 'br-mgmt',
'mgmt/database': 'br-mgmt',
'mgmt/messaging': 'br-mgmt',
'mgmt/corosync': 'br-mgmt',
'mgmt/memcache': 'br-mgmt',
'mgmt/vip': 'br-mgmt',
'public/vip': 'br-ex',
'neutron/private': 'br-prv',
'neutron/mesh': 'br-mgmt',
'neutron/floating': 'br-floating',
'swift/replication': 'br-storage',
'ceph/public': 'br-mgmt',
'ceph/radosgw': 'br-ex',
'ceph/replication': 'br-storage',
'cinder/iscsi': 'br-storage',
'mongo/db': 'br-mgmt',
# deprecated
'fw-admin': 'br-fw-admin',
'management': 'br-mgmt',
'ex': 'br-ex',
'storage': 'br-storage',
}
self.assertEqual(roles, expected_roles)
def test_offloading_modes_serialize(self):
meta = self.env.default_metadata()
changed_offloading_modes = {}
for interface in meta['interfaces']:
changed_offloading_modes[interface['name']] = \
NetworkManager._get_modified_offloading_modes(
interface.get('offloading_modes')
)
for node in self.serialized_for_astute:
interfaces = node['network_scheme']['interfaces']
for iface_name in interfaces:
ethtool_blk = interfaces[iface_name].get('ethtool', None)
self.assertIsNotNone(
ethtool_blk,
"There is no 'ethtool' block in deployment data")
offload_blk = ethtool_blk.get('offload', None)
self.assertIsNotNone(
offload_blk,
"There is no 'offload' block in deployment data")
self.assertDictEqual(offload_blk,
changed_offloading_modes[iface_name])
def test_network_metadata(self):
nm = objects.Cluster.get_network_manager(self.env.clusters[0])
ip_by_net = {
'fuelweb_admin': None,
'storage': None,
'management': None,
'public': None
}
node = self.env.nodes[0]
for net in ip_by_net:
netgroup = nm.get_node_network_by_netname(node, net)
if netgroup.get('ip'):
ip_by_net[net] = netgroup['ip'].split('/')[0]
for node_data in self.serialized_for_astute:
self.assertItemsEqual(
node_data['network_metadata'], ['nodes', 'vips'])
for k, v in six.iteritems(node_data['network_metadata']['nodes']):
self.assertItemsEqual(
v,
['uid', 'fqdn', 'name', 'user_node_name',
'swift_zone', 'node_roles', 'network_roles']
)
self.assertEqual(objects.Node.make_slave_name(node), k)
self.assertEqual(v['uid'], node.uid)
self.assertEqual(v['fqdn'], node.fqdn)
self.assertEqual(v['name'], k)
self.assertEqual(v['user_node_name'], node.name)
self.assertEqual(v['swift_zone'], node.uid)
network_roles = {
'admin/pxe': ip_by_net['fuelweb_admin'],
'fw-admin': ip_by_net['fuelweb_admin'],
'keystone/api': ip_by_net['management'],
'neutron/api': ip_by_net['management'],
'swift/api': ip_by_net['management'],
'sahara/api': ip_by_net['management'],
'ceilometer/api': ip_by_net['management'],
'cinder/api': ip_by_net['management'],
'glance/api': ip_by_net['management'],
'heat/api': ip_by_net['management'],
'nova/api': ip_by_net['management'],
'murano/api': ip_by_net['management'],
'horizon': ip_by_net['management'],
'management': ip_by_net['management'],
'mgmt/api': ip_by_net['management'],
'mgmt/database': ip_by_net['management'],
'mgmt/messaging': ip_by_net['management'],
'mgmt/corosync': ip_by_net['management'],
'mgmt/memcache': ip_by_net['management'],
'mgmt/vip': ip_by_net['management'],
'mongo/db': ip_by_net['management'],
'neutron/mesh': ip_by_net['management'],
'ceph/public': ip_by_net['management'],
'neutron/private': None,
'neutron/floating': None,
'storage': ip_by_net['storage'],
'ceph/replication': ip_by_net['storage'],
'swift/replication': ip_by_net['storage'],
'cinder/iscsi': ip_by_net['storage'],
'ex': ip_by_net['public'],
'public/vip': ip_by_net['public'],
'ceph/radosgw': ip_by_net['public'],
}
self.assertEqual(
v['network_roles'],
network_roles
)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def testHandleDtypeShapeMatch(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32."):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to assign variable with wrong "
"dtype. Expected int32 got float."):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testUnprintableHandle(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes()
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
@test_util.run_in_graph_and_eager_modes()
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes()
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes()
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with self.test_session(use_gpu=True):
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
@test_util.run_in_graph_and_eager_modes()
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
def testInitFn(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
@test_util.run_in_graph_and_eager_modes()
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes()
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes()
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes()
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testSparseRead(self):
with self.test_session():
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
def testToFromProto(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
@test_util.run_in_graph_and_eager_modes()
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0)
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.NotFoundError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
def testAssignDifferentShapes(self):
with self.test_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEager(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = w.value().op.get_attr("_class")
@test_util.run_in_graph_and_eager_modes()
def testSharedName(self):
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4")
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var5")
with self.assertRaisesOpError("Resource .*/var5/.* does not exist"):
x_read = resource_variable_ops.read_variable_op(x, v.dtype.base_dtype)
self.evaluate(x_read)
@test_util.run_in_graph_and_eager_modes()
def testSharedNameWithNamescope(self):
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var6")
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes()
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if context.in_graph_mode():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
def testSetInitialValue(self):
with self.test_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertEqual(context.get_default_context().device_name, v.device)
self.assertTrue(isinstance(v.handle, ops.EagerTensor))
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
if __name__ == "__main__":
test.main()
| |
from __future__ import division, with_statement
import gzip
import mmap
import os
import sys
import tempfile
import warnings
import zipfile
import numpy as np
from numpy import memmap as Memmap
from .extern.six import b, string_types
from .extern.six.moves import urllib, reduce
from .util import (isreadable, iswritable, isfile, fileobj_open, fileobj_name,
fileobj_closed, fileobj_mode, _array_from_file,
_array_to_file, _write_string, encode_ascii)
# Maps PyFITS-specific file mode names to the appropriate file modes to use
# for the underlying raw files
PYFITS_MODES = {
'readonly': 'rb',
'copyonwrite': 'rb',
'update': 'rb+',
'append': 'ab+',
'ostream': 'wb',
'denywrite': 'rb'}
# This is the old name of the PYFITS_MODES dict; it is maintained here for
# backwards compatibility and should be removed no sooner than PyFITS 3.4
PYTHON_MODES = PYFITS_MODES
# Maps OS-level file modes to the appropriate PyFITS specific mode to use
# when given file objects but no mode specified; obviously in PYFITS_MODES
# there are overlaps; for example 'readonly' and 'denywrite' both require
# the file to be opened in 'rb' mode. But 'readonly' is the default
# behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
'rb': 'readonly', 'rb+': 'update',
'wb': 'ostream', 'wb+': 'update',
'ab': 'ostream', 'ab+': 'append'}
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {'readonly': 'c', 'copyonwrite': 'c', 'update': 'r+',
'append': 'c', 'denywrite': 'r'}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b('\x1f\x8b\x08')
PKZIP_MAGIC = b('\x50\x4b\x03\x04')
class _File(object):
"""
Represents a FITS file on disk (or in some other file-like object).
"""
# See self._test_mmap
_mmap_available = None
def __init__(self, fileobj=None, mode=None, memmap=False, clobber=False):
if fileobj is None:
self.__file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
self.simulateonly = True
return
else:
self.simulateonly = False
if mode is None:
if _is_random_access_file_backed(fileobj):
fmode = fileobj_mode(fileobj)
# If the mode is unsupported just leave it as None; we'll
# catch this case below
mode = FILE_MODES.get(fmode)
else:
mode = 'readonly' # The default
if mode not in PYFITS_MODES:
raise ValueError("Mode '%s' not recognized" % mode)
if (isinstance(fileobj, string_types) and
mode not in ('ostream', 'append') and
not os.path.exists(fileobj)):
# Not writing file and file does not exist on local machine and
# name does not begin with a drive letter (Windows), try to get it
# over the web.
try:
if not os.path.splitdrive(fileobj)[0]:
# Basically if the filename (on Windows anyways) doesn't
# have a drive letter try to open it as a URL
self.name, _ = urllib.request.urlretrieve(fileobj)
else:
# Otherwise the file was already not found so just raise
# a ValueError
raise ValueError("File not found")
except (TypeError, ValueError, IOError):
# A couple different exceptions can occur here when passing a
# filename into urlretrieve in Python 3
raise IOError('File does not exist: %r' % fileobj)
else:
self.name = fileobj_name(fileobj)
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# More defaults to be adjusted below as necessary
self.compression = None
self.readonly = False
self.writeonly = False
# Initialize the internal self.__file object
if _is_random_access_file_backed(fileobj):
self._open_fileobj(fileobj, mode, clobber)
elif isinstance(fileobj, string_types):
self._open_filename(fileobj, mode, clobber)
else:
self._open_filelike(fileobj, mode, clobber)
if isinstance(fileobj, gzip.GzipFile):
self.compression = 'gzip'
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = 'zip'
if (mode in ('readonly', 'copyonwrite', 'denywrite') or
(self.compression and mode == 'update')):
self.readonly = True
elif (mode == 'ostream' or
(self.compression and mode == 'append')):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if (mode == 'ostream' or self.compression or
not hasattr(self.__file, 'seek')):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self.__file.tell()
self.__file.seek(0, 2)
self.size = self.__file.tell()
self.__file.seek(pos)
if self.memmap:
if not isfile(self.__file):
self.memmap = False
elif not self.readonly and not self._test_mmap():
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return '<%s.%s %s>' % (self.__module__, self.__class__.__name__,
self.__file)
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self.__file)
def read(self, size=None):
if not hasattr(self.__file, 'read'):
raise EOFError
try:
return self.__file.read(size)
except IOError:
# On some versions of Python, it appears, GzipFile will raise an
# IOError if you try to read past its end (as opposed to just
# returning '')
if self.compression == 'gzip':
return ''
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self.__file, 'read'):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError('size %d not a multiple of %s' % (size, dtype))
if isinstance(shape, int):
shape = (shape,)
if size and shape:
actualsize = sum(dim * dtype.itemsize for dim in shape)
if actualsize < size:
raise ValueError('size %d is too few bytes for a %s array of '
'%s' % (size, shape, dtype))
if actualsize < size:
raise ValueError('size %d is too many bytes for a %s array of '
'%s' % (size, shape, dtype))
if size and not shape:
shape = (size // dtype.itemsize,)
if not (size or shape):
warnings.warn('No size or shape given to readarray(); assuming a '
'shape of (1,)')
shape = (1,)
if self.memmap:
return Memmap(self.__file, offset=offset,
mode=MEMMAP_MODES[self.mode], dtype=dtype,
shape=shape).view(np.ndarray)
else:
count = reduce(lambda x, y: x * y, shape)
pos = self.__file.tell()
self.__file.seek(offset)
data = _array_from_file(self.__file, dtype, count, '')
data.shape = shape
self.__file.seek(pos)
return data
def writable(self):
if self.readonly:
return False
return iswritable(self.__file)
def write(self, string):
if hasattr(self.__file, 'write'):
_write_string(self.__file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if hasattr(self.__file, 'write'):
_array_to_file(array, self.__file)
def flush(self):
if hasattr(self.__file, 'flush'):
self.__file.flush()
def seek(self, offset, whence=0):
# In newer Python versions, GzipFiles support the whence argument, but
# I don't think it was added until 2.6; instead of assuming it's
# present, we implement our own support for it here
if not hasattr(self.__file, 'seek'):
return
if isinstance(self.__file, gzip.GzipFile):
if whence:
if whence == 1:
offset = self.__file.offset + offset
else:
raise ValueError('Seek from end not supported')
self.__file.seek(offset)
else:
self.__file.seek(offset, whence)
pos = self.__file.tell()
if self.size and pos > self.size:
warnings.warn('File may have been truncated: actual file length '
'(%i) is smaller than the expected size (%i)' %
(self.size, pos))
def tell(self):
if not hasattr(self.__file, 'tell'):
raise EOFError
return self.__file.tell()
def truncate(self, size=None):
if hasattr(self.__file, 'truncate'):
self.__file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self.__file, 'close'):
self.__file.close()
self.closed = True
def _overwrite_existing(self, clobber, fileobj, closed):
"""Overwrite an existing file if ``clobber`` is ``True``, otherwise
raise an IOError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if ((self.file_like and
(hasattr(fileobj, 'len') and fileobj.len > 0)) or
(os.path.exists(self.name) and
os.path.getsize(self.name) != 0)):
if clobber:
warnings.warn("Overwriting existing file %r." % self.name)
if self.file_like and hasattr(fileobj, 'truncate'):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise IOError("File %r already exists." % self.name)
def _open_fileobj(self, fileobj, mode, clobber):
"""Open a FITS file from a file object or a GzipFile object."""
closed = fileobj_closed(fileobj)
fmode = fileobj_mode(fileobj) or PYFITS_MODES[mode]
if mode == 'ostream':
self._overwrite_existing(clobber, fileobj, closed)
if not closed:
# Although we have a specific mapping in PYFITS_MODES from our
# custom file modes to raw file object modes, many of the latter
# can be used appropriately for the former. So determine whether
# the modes match up appropriately
if ((mode in ('readonly', 'denywrite', 'copyonwrite') and
not ('r' in fmode or '+' in fmode)) or
(mode == 'append' and fmode not in ('ab+', 'rb+')) or
(mode == 'ostream' and
not ('w' in fmode or 'a' in fmode or '+' in fmode)) or
(mode == 'update' and fmode not in ('rb+', 'wb+'))):
raise ValueError(
"Mode argument '%s' does not match mode of the input "
"file (%s)." % (mode, fmode))
self.__file = fileobj
elif isfile(fileobj):
self.__file = fileobj_open(self.name, PYFITS_MODES[mode])
else:
self.__file = gzip.open(self.name, PYFITS_MODES[mode])
if fmode == 'ab+':
# Return to the beginning of the file--in Python 3 when opening in
# append mode the file pointer is at the end of the file
self.__file.seek(0)
def _open_filelike(self, fileobj, mode, clobber):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self.__file = fileobj
if fileobj_closed(fileobj):
raise IOError("Cannot read from/write to a closed file-like "
"object (%r)." % fileobj)
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
self.__file.seek(0)
# We can bypass any additional checks at this point since now
# self.__file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if (not hasattr(self.__file, 'seek') or
not hasattr(self.__file, 'tell')):
self.mode = mode = 'ostream'
if mode == 'ostream':
self._overwrite_existing(clobber, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if (self.mode in ('update', 'append', 'ostream') and
not hasattr(self.__file, 'write')):
raise IOError("File-like object does not have a 'write' "
"method, required for mode '%s'."
% self.mode)
# Any mode except for 'ostream' requires readability
if self.mode != 'ostream' and not hasattr(self.__file, 'read'):
raise IOError("File-like object does not have a 'read' "
"method, required for mode %r."
% self.mode)
def _open_filename(self, filename, mode, clobber):
"""Open a FITS file from a filename string."""
if mode == 'ostream':
self._overwrite_existing(clobber, None, True)
if os.path.exists(self.name):
with fileobj_open(self.name, 'rb') as f:
magic = f.read(4)
else:
magic = b('')
ext = os.path.splitext(self.name)[1]
if ext == '.gz' or magic.startswith(GZIP_MAGIC):
# Handle gzip files
self.__file = gzip.open(self.name, PYFITS_MODES[mode])
self.compression = 'gzip'
elif ext == '.zip' or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
else:
self.__file = fileobj_open(self.name, PYFITS_MODES[mode])
# Make certain we're back at the beginning of the file
self.__file.seek(0)
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ('update', 'append'):
raise IOError(
"Writing to zipped fits files is not currently "
"supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise IOError(
"Zip files with multiple members are not supported.")
self.__file = tempfile.NamedTemporaryFile(suffix='.fits')
self.__file.write(zfile.read(namelist[0]))
if close:
zfile.close()
self.compression = 'zip'
def _test_mmap(self):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
if self._mmap_available is not None:
return self._mmap_available
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, encode_ascii(' '))
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except mmap.error:
exc = sys.exc_info()[1]
warnings.warn('Failed to create mmap: %s; mmap use will be '
'disabled' % exc)
_File._mmap_available = False
del exc
return False
try:
mm.flush()
except mmap.error:
warnings.warn('mmap.flush is unavailable on this platform; '
'using mmap in writeable mode will be disabled')
_File._mmap_available = False
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
_File._mmap_available = True
return True
def _is_random_access_file_backed(fileobj):
"""Returns `True` if fileobj is a `file` or `io.FileIO` object or a
`gzip.GzipFile` object.
Although reading from a zip file is supported, this does not include
support for random access, and we do not yet support reading directly
from an already opened `zipfile.ZipFile` object.
"""
return isfile(fileobj) or isinstance(fileobj, gzip.GzipFile)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import time
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api import validator
from nova import block_device
from nova import compute
from nova.compute import instance_types
from nova.compute import vm_states
from nova import db
from nova import exception
from nova import flags
from nova.image import s3
from nova import network
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova import utils
from nova import volume
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def validate_ec2_id(val):
if not validator.validate_str()(val):
raise exception.InvalidInstanceIDMalformed(val)
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
raise exception.InvalidInstanceIDMalformed(val)
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
vm_states.RESIZED: inst_state.RESIZE,
}
def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string"""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = id
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = id
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Contruct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if k in bdm:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType"""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.security_group_api = CloudSecurityGroupAPI()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute.api.KeypairAPI()
def __str__(self):
return 'CloudController'
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _get_zones(self, context):
"""Return available and unavailable zones."""
enabled_services = db.service_get_all(context, False)
disabled_services = db.service_get_all(context, True)
available_zones = []
for zone in [service.availability_zone for service
in enabled_services]:
if not zone in available_zones:
available_zones.append(zone)
not_available_zones = []
for zone in [service.availability_zone for service in disabled_services
if not service['availability_zone'] in available_zones]:
if not zone in not_available_zones:
not_available_zones.append(zone)
return (available_zones, not_available_zones)
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = self._get_zones(ctxt)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = self._get_zones(ctxt)
# Available services
enabled_services = db.service_get_all(context, False)
zone_hosts = {}
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service.availability_zone, [])
if not service.host in zone_hosts[service.availability_zone]:
zone_hosts[service.availability_zone].append(service.host)
host_services.setdefault(service.host, [])
host_services[service.host].append(service)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for host in zone_hosts[zone]:
result.append({'zoneName': '|- %s' % host,
'zoneState': ''})
for service in host_services[host]:
alive = utils.service_is_up(service)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if service['disabled']:
active = 'disabled'
result.append({'zoneName': '| |- %s' % service['binary'],
'zoneState': ('%s %s %s'
% (active, art,
service['updated_at']))})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def describe_regions(self, context, region_name=None, **kwargs):
if FLAGS.region_list:
regions = []
for region in FLAGS.region_list:
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme,
host,
FLAGS.ec2_port,
FLAGS.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
FLAGS.ec2_host,
FLAGS.ec2_port,
FLAGS.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
if snapshot_id:
snapshots = []
for ec2_id in snapshot_id:
internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id)
snapshot = self.volume_api.get_snapshot(
context,
snapshot_id=internal_id)
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
snapshots = [self._format_snapshot(context, s) for s in snapshots]
return {'snapshotSet': snapshots}
def _format_snapshot(self, context, snapshot):
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
s['status'] = snapshot['status']
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
s['volumeSize'] = snapshot['volume_size']
s['description'] = snapshot['display_description']
return s
def create_snapshot(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
volume = self.volume_api.get(context, volume_id)
args = (context, volume, kwargs.get('name'), kwargs.get('description'))
if kwargs.get('force', False):
snapshot = self.volume_api.create_snapshot_force(*args)
else:
snapshot = self.volume_api.create_snapshot(*args)
db.ec2_snapshot_create(context, snapshot['id'])
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id)
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
self.volume_api.delete_snapshot(context, snapshot)
return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = self.keypair_api.get_key_pairs(context, context.user_id)
if not key_name is None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
#If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.EC2APIError(msg)
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = FLAGS.vpn_key_suffix
if context.is_admin or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
try:
keypair = self.keypair_api.create_key_pair(context,
context.user_id,
key_name)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg)
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
except exception.KeyPairExists:
msg = _("Key pair '%s' already exists.") % key_name
raise exception.KeyPairExists(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': keypair['private_key']}
# TODO(vish): when context is no longer an object, pass it here
def import_key_pair(self, context, key_name, public_key_material,
**kwargs):
LOG.audit(_("Import key %s"), key_name, context=context)
public_key = base64.b64decode(public_key_material)
try:
keypair = self.keypair_api.import_key_pair(context,
context.user_id,
key_name,
public_key)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg)
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
except exception.KeyPairExists:
msg = _("Key pair '%s' already exists.") % key_name
raise exception.EC2APIError(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
self.keypair_api.delete_key_pair(context, context.user_id,
key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
search_opts = ec2utils.search_opts_from_filters(kwargs.get('filter'))
raw_groups = self.security_group_api.list(context,
group_name,
group_id,
context.project_id,
search_opts=search_opts)
groups = [self._format_security_group(context, g) for g in raw_groups]
return {'securityGroupInfo':
list(sorted(groups,
key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group.description
g['groupName'] = group.name
g['ownerId'] = group.project_id
g['ipPermissions'] = []
for rule in group.rules:
r = {}
r['groups'] = []
r['ipRanges'] = []
if rule.group_id:
source_group = db.security_group_get(context, rule.group_id)
r['groups'] += [{'groupName': source_group.name,
'userId': source_group.project_id}]
if rule.protocol:
r['ipProtocol'] = rule.protocol
r['fromPort'] = rule.from_port
r['toPort'] = rule.to_port
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
('tcp', 1, 65535),
('udp', 1, 65535)):
r['ipProtocol'] = protocol
r['fromPort'] = min_port
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
r['ipProtocol'] = rule.protocol
r['fromPort'] = rule.from_port
r['toPort'] = rule.to_port
r['ipRanges'] += [{'cidrIp': rule.cidr}]
g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
rules = []
if not 'groups' in kwargs and not 'ip_ranges' in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
return rules
if 'ip_ranges' in kwargs:
rules = self._cidr_args_split(kwargs)
else:
rules = [kwargs]
finalset = []
for rule in rules:
if 'groups' in rule:
groups_values = self._groups_args_split(rule)
for groups_value in groups_values:
final = self._rule_dict_last_step(context, **groups_value)
finalset.append(final)
else:
final = self._rule_dict_last_step(context, **rule)
finalset.append(final)
return finalset
def _cidr_args_split(self, kwargs):
cidr_args_split = []
cidrs = kwargs['ip_ranges']
for key, cidr in cidrs.iteritems():
mykwargs = kwargs.copy()
del mykwargs['ip_ranges']
mykwargs['cidr_ip'] = cidr['cidr_ip']
cidr_args_split.append(mykwargs)
return cidr_args_split
def _groups_args_split(self, kwargs):
groups_args_split = []
groups = kwargs['groups']
for key, group in groups.iteritems():
mykwargs = kwargs.copy()
del mykwargs['groups']
if 'group_name' in group:
mykwargs['source_security_group_name'] = group['group_name']
if 'user_id' in group:
mykwargs['source_security_group_owner_id'] = group['user_id']
if 'group_id' in group:
mykwargs['source_security_group_id'] = group['group_id']
groups_args_split.append(mykwargs)
return groups_args_split
def _rule_dict_last_step(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None, user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = db.security_group_get_by_name(
context.elevated(),
source_project_id,
source_security_group_name)
notfound = exception.SecurityGroupNotFound
if not source_security_group:
raise notfound(security_group_id=source_security_group_name)
group_id = source_security_group['id']
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr_ip)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def _validate_group_identifier(self, group_name, group_id):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
def _validate_rulevalues(self, rulesvalues):
if not rulesvalues:
err = _("%s Not enough parameters to build a valid rule")
raise exception.EC2APIError(err % rulesvalues)
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
rule_ids = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
rule_ids.append(self.security_group_api.rule_exists(
security_group, values_for_rule))
rule_ids = [id for id in rule_ids if id]
if rule_ids:
self.security_group_api.remove_rules(context, security_group,
rule_ids)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
postvalues = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
if self.security_group_api.rule_exists(security_group,
values_for_rule):
err = _('%s - This rule already exists in group')
raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule)
if postvalues:
self.security_group_api.add_rules(context, security_group['id'],
security_group['name'], postvalues)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project_id
return source_project_id
def create_security_group(self, context, group_name, group_description):
if isinstance(group_name, unicode):
group_name = group_name.encode('utf-8')
if FLAGS.ec2_strict_validation:
# EC2 specification gives constraints for name and description:
# Accepts alphanumeric characters, spaces, dashes, and underscores
allowed = '^[a-zA-Z0-9_\- ]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
self.security_group_api.validate_property(group_description,
'description', allowed)
else:
# Amazon accepts more symbols.
# So, allow POSIX [:print:] characters.
allowed = r'^[\x20-\x7E]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
group_ref = self.security_group_api.create(context, group_name,
group_description)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
security_group = self.security_group_api.get(context, group_name,
group_id)
self.security_group_api.destroy(context, security_group)
return True
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_id = ec2utils.ec2_id_to_id(ec2_id)
instance = self.compute_api.get(context, instance_id)
output = self.compute_api.get_console_output(context, instance)
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
validate_ec2_id(ec2_id)
internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
instance_ec2_id = None
instance_data = None
if volume.get('instance_uuid', None):
instance_uuid = volume['instance_uuid']
instance = db.instance_get_by_uuid(context.elevated(),
instance_uuid)
instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
instance_data = '%s[%s]' % (instance_ec2_id,
instance['host'])
v = {}
v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id'])
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if context.is_admin:
# NOTE(dprince): project_id and host_id are unset w/ Cinder
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
volume.get('project_id', ''),
volume.get('host', ''),
instance_data,
volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
if volume.get('snapshot_id') is not None:
v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id'])
else:
v['snapshotId'] = None
return v
def create_volume(self, context, **kwargs):
snapshot_ec2id = kwargs.get('snapshot_id', None)
if snapshot_ec2id is not None:
snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id'])
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id,
context=context)
else:
snapshot = None
LOG.audit(_("Create volume of %s GB"),
kwargs.get('size'),
context=context)
create_kwargs = dict(snapshot=snapshot,
volume_type=kwargs.get('volume_type'),
metadata=kwargs.get('metadata'),
availability_zone=kwargs.get('availability_zone'))
volume = self.volume_api.create(context,
kwargs.get('size'),
kwargs.get('name'),
kwargs.get('description'),
**create_kwargs)
db.ec2_volume_create(context, volume['id'])
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.delete(context, volume)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Delete Failed'))
return True
def attach_volume(self, context,
volume_id,
instance_id,
device, **kwargs):
validate_ec2_id(instance_id)
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_id = ec2utils.ec2_id_to_id(instance_id)
instance = self.compute_api.get(context, instance_id)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Attach Failed.'))
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(instance_id),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def detach_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
try:
self.compute_api.detach_volume(context, volume_id=volume_id)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Detach Volume Failed.'))
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(
volume['instance_uuid']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _format_kernel_id(self, context, instance_ref, result, key):
kernel_uuid = instance_ref['kernel_id']
if kernel_uuid is None or kernel_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki')
def _format_ramdisk_id(self, context, instance_ref, result, key):
ramdisk_uuid = instance_ref['ramdisk_id']
if ramdisk_uuid is None or ramdisk_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid,
'ari')
def describe_instance_attribute(self, context, instance_id, attribute,
**kwargs):
def _unsupported_attribute(instance, result):
raise exception.EC2APIError(_('attribute not supported: %s') %
attribute)
def _format_attr_block_device_mapping(instance, result):
tmp = {}
self._format_instance_root_device_name(instance, tmp)
self._format_instance_bdm(context, instance['uuid'],
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
result['disableApiTermination'] = instance['disable_terminate']
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
if instance['shutdown_terminate']:
result['instanceInitiatedShutdownBehavior'] = 'terminate'
else:
result['instanceInitiatedShutdownBehavior'] = 'stop'
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
def _format_attr_kernel(instance, result):
self._format_kernel_id(context, instance, result, 'kernel')
def _format_attr_ramdisk(instance, result):
self._format_ramdisk_id(context, instance, result, 'ramdisk')
def _format_attr_root_device_name(instance, result):
self._format_instance_root_device_name(instance, result)
def _format_attr_source_dest_check(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_user_data(instance, result):
result['userData'] = base64.b64decode(instance['user_data'])
attribute_formatter = {
'blockDeviceMapping': _format_attr_block_device_mapping,
'disableApiTermination': _format_attr_disable_api_termination,
'groupSet': _format_attr_group_set,
'instanceInitiatedShutdownBehavior':
_format_attr_instance_initiated_shutdown_behavior,
'instanceType': _format_attr_instance_type,
'kernel': _format_attr_kernel,
'ramdisk': _format_attr_ramdisk,
'rootDeviceName': _format_attr_root_device_name,
'sourceDestCheck': _format_attr_source_dest_check,
'userData': _format_attr_user_data,
}
fn = attribute_formatter.get(attribute)
if fn is None:
raise exception.EC2APIError(
_('attribute not supported: %s') % attribute)
ec2_instance_id = instance_id
validate_ec2_id(instance_id)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
instance = self.compute_api.get(context, instance_id)
result = {'instance_id': ec2_instance_id}
fn(instance, result)
return result
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
return self._format_describe_instances(context,
instance_id=instance_id)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
return self._format_describe_instances(context,
instance_id=instance_id, use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_terminate_instances(self, context, instance_id,
previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_id, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
instance = self.compute_api.get(context, internal_id)
i['currentState'] = _state_description(instance['vm_state'],
instance['shutdown_terminate'])
except exception.NotFound:
i['currentState'] = _state_description(vm_states.DELETED,
True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType"""
root_device_type = 'instance-store'
mapping = []
for bdm in db.block_device_mapping_get_all_by_instance(context,
instance_uuid):
volume_id = bdm['volume_id']
if (volume_id is None or bdm['no_device']):
continue
if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name']
root_device_type = 'ebs'
vol = self.volume_api.get(context, volume_id)
LOG.debug(_("vol = %s\n"), vol)
# TODO(yamahata): volume attach time
ebs = {'volumeId': volume_id,
'deleteOnTermination': bdm['delete_on_termination'],
'attachTime': vol['attach_time'] or '-',
'status': vol['status'], }
res = {'deviceName': bdm['device_name'],
'ebs': ebs, }
mapping.append(res)
if mapping:
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or
block_device.DEFAULT_ROOT_DEV_NAME)
@staticmethod
def _format_instance_type(instance, result):
if instance['instance_type']:
result['instanceType'] = instance['instance_type'].get('name')
else:
result['instanceType'] = None
@staticmethod
def _format_group_set(instance, result):
security_group_names = []
if instance.get('security_groups'):
for security_group in instance['security_groups']:
security_group_names.append(security_group['name'])
result['groupSet'] = utils.convert_to_list_dict(
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
**search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
ec2_id)
instance = self.compute_api.get(context, instance_uuid)
except exception.NotFound:
continue
instances.append(instance)
else:
try:
# always filter out deleted instances
search_opts['deleted'] = False
instances = self.compute_api.get_all(context,
search_opts=search_opts,
sort_dir='asc')
except exception.NotFound:
instances = []
for instance in instances:
if not context.is_admin:
if instance['image_ref'] == str(FLAGS.vpn_image_id):
continue
i = {}
instance_uuid = instance['uuid']
ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
i['instanceId'] = ec2_id
image_uuid = instance['image_ref']
i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid)
self._format_kernel_id(context, instance, i, 'kernelId')
self._format_ramdisk_id(context, instance, i, 'ramdiskId')
i['instanceState'] = _state_description(
instance['vm_state'], instance['shutdown_terminate'])
fixed_ip = None
floating_ip = None
ip_info = ec2utils.get_ip_info_for_instance(context, instance)
if ip_info['fixed_ips']:
fixed_ip = ip_info['fixed_ips'][0]
if ip_info['floating_ips']:
floating_ip = ip_info['floating_ips'][0]
if ip_info['fixed_ip6s']:
i['dnsNameV6'] = ip_info['fixed_ip6s'][0]
if FLAGS.ec2_private_dns_show_ip:
i['privateDnsName'] = fixed_ip
else:
i['privateDnsName'] = instance['hostname']
i['privateIpAddress'] = fixed_ip
i['publicDnsName'] = floating_ip
i['ipAddress'] = floating_ip or fixed_ip
i['dnsName'] = i['ipAddress'] or i['privateDnsName']
i['keyName'] = instance['key_name']
if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = utils.convert_to_list_dict([],
'product_codes')
self._format_instance_type(instance, i)
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
self._format_instance_root_device_name(instance, i)
self._format_instance_bdm(context, instance['uuid'],
i['rootDeviceName'], i)
host = instance['host']
services = db.service_get_all_by_host(context.elevated(), host)
zone = ec2utils.get_availability_zone_by_host(services, host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
self._format_group_set(instance, r)
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
def describe_addresses(self, context, public_ip=None, **kwargs):
if public_ip:
floatings = []
for address in public_ip:
floating = self.network_api.get_floating_ip_by_address(context,
address)
floatings.append(floating)
else:
floatings = self.network_api.get_floating_ips_by_project(context)
addresses = [self._format_address(context, f) for f in floatings]
return {'addressesSet': addresses}
def _format_address(self, context, floating_ip):
ec2_id = None
if floating_ip['fixed_ip_id']:
fixed_id = floating_ip['fixed_ip_id']
fixed = self.network_api.get_fixed_ip(context, fixed_id)
if fixed['instance_uuid'] is not None:
ec2_id = ec2utils.id_to_ec2_inst_id(fixed['instance_uuid'])
address = {'public_ip': floating_ip['address'],
'instance_id': ec2_id}
if context.is_admin:
details = "%s (%s)" % (address['instance_id'],
floating_ip['project_id'])
address['instance_id'] = details
return address
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
try:
public_ip = self.network_api.allocate_floating_ip(context)
except exception.FloatingIpLimitExceeded:
raise exception.EC2APIError(_('No more floating IPs available'))
return {'publicIp': public_ip}
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
try:
self.network_api.release_floating_ip(context, address=public_ip)
return {'return': "true"}
except exception.FloatingIpNotFound:
raise exception.EC2APIError(_('Unable to release IP Address.'))
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
instance_id = ec2utils.ec2_id_to_id(instance_id)
instance = self.compute_api.get(context, instance_id)
cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance)
fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s']
if not fixed_ips:
msg = _('Unable to associate IP Address, no fixed_ips.')
raise exception.EC2APIError(msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist but this may not apply to ec2..
if len(fixed_ips) > 1:
msg = _('multiple fixed_ips exist, using the first: %s')
LOG.warning(msg, fixed_ips[0])
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=public_ip,
fixed_address=fixed_ips[0])
return {'return': 'true'}
except exception.FloatingIpAssociated:
msg = _('Floating ip is already associated.')
raise exception.EC2APIError(msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed.')
raise exception.EC2APIError(msg)
except Exception:
msg = _('Error, unable to associate floating ip.')
LOG.exception(msg)
raise exception.EC2APIError(msg)
def disassociate_address(self, context, public_ip, **kwargs):
instance_id = self.network_api.get_instance_id_by_floating_address(
context, public_ip)
instance = self.compute_api.get(context, instance_id)
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
try:
self.network_api.disassociate_floating_ip(context, instance,
address=public_ip)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated.')
raise exception.EC2APIError(msg)
return {'return': "true"}
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
metadata = {}
LOG.debug(_('run instance with extra feature'))
extended_arg = kwargs.get('instance_type', None)
instance_type = None
if extended_arg is not None:
extra_features = extended_arg.rsplit(';')
instance_type = extra_features.pop(0)
for feature in extra_features:
feature = feature.rsplit('=')
metadata[feature[0].strip()] = feature[1].strip()
min_count = int(kwargs.get('min_count', 1))
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = ec2utils.id_to_glance_id(context,
kernel['id'])
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context,
ramdisk['id'])
for bdm in kwargs.get('block_device_mapping', []):
_parse_block_device_mapping(bdm)
image = self._get_image(context, kwargs['image_id'])
image_uuid = ec2utils.id_to_glance_id(context, image['id'])
if image:
image_state = self._get_image_state(image)
else:
raise exception.ImageNotFoundEC2(image_id=kwargs['image_id'])
if image_state != 'available':
raise exception.EC2APIError(_('Image must be available'))
(instances, resv_id) = self.compute_api.create(context,
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
image_href=image_uuid,
max_count=int(kwargs.get('max_count', min_count)),
min_count=min_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
metadata=metadata,
availability_zone=kwargs.get('placement', {}).get(
'availability_zone'),
block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context, resv_id)
def _ec2_ids_to_instances(self, context, instance_id):
"""Get all instances first, to prevent partial executions"""
instances = []
for ec2_id in instance_id:
validate_ec2_id(ec2_id)
_instance_id = ec2utils.ec2_id_to_id(ec2_id)
instance = self.compute_api.get(context, _instance_id)
instances.append(instance)
return instances
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
previous_states = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start terminating instances"))
for instance in previous_states:
self.compute_api.delete(context, instance)
return self._format_terminate_instances(context,
instance_id,
previous_states)
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for instance in instances:
self.compute_api.reboot(context, instance, 'HARD')
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to stop instances"))
for instance in instances:
self.compute_api.stop(context, instance)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start instances"))
for instance in instances:
self.compute_api.start(context, instance)
return True
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
filters = {'name': ec2_id}
images = self.image_service.detail(context, filters=filters)
try:
return images[0]
except IndexError:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if ec2utils.image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by GlanceImageService to S3 format."""
i = {}
image_type = ec2utils.image_type(image.get('container_format'))
ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image.get('owner')
img_loc = image['properties'].get('image_location')
if img_loc:
i['imageLocation'] = img_loc
else:
i['imageLocation'] = "%s (%s)" % (img_loc, name)
i['name'] = name
if not name and img_loc:
# This should only occur for images registered with ec2 api
# prior to that api populating the glance name
i['name'] = img_loc
i['imageState'] = self._get_image_state(image)
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = not not image.get('is_public')
i['architecture'] = image['properties'].get('architecture')
properties = image['properties']
root_device_name = block_device.properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in properties.get('block_device_mapping', []):
if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME)
i['rootDeviceType'] = root_device_type
_format_mappings(properties, i)
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
if image_id:
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return True
def _register_image(self, context, metadata):
image = self.image_service.create(context, metadata)
image_type = ec2utils.image_type(image.get('container_format'))
image_id = ec2utils.image_ec2_id(image['id'], image_type)
return image_id
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and kwargs.get('name'):
image_location = kwargs['name']
if image_location is None:
raise exception.EC2APIError(_('imageLocation is required'))
metadata = {'properties': {'image_location': image_location}}
if kwargs.get('name'):
metadata['name'] = kwargs['name']
else:
metadata['name'] = image_location
if 'root_device_name' in kwargs:
metadata['properties']['root_device_name'] = kwargs.get(
'root_device_name')
mappings = [_parse_block_device_mapping(bdm) for bdm in
kwargs.get('block_device_mapping', [])]
if mappings:
metadata['properties']['block_device_mapping'] = mappings
image_id = self._register_image(context, metadata)
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
def _block_device_mapping_attribute(image, result):
_format_mappings(image['properties'], result)
def _launch_permission_attribute(image, result):
result['launchPermission'] = []
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
def _root_device_name_attribute(image, result):
_prop_root_dev_name = block_device.properties_root_device_name
result['rootDeviceName'] = _prop_root_dev_name(image['properties'])
if result['rootDeviceName'] is None:
result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME
def _kernel_attribute(image, result):
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
result['kernel'] = {
'value': ec2utils.image_ec2_id(kernel_id, 'aki')
}
def _ramdisk_attribute(image, result):
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
result['ramdisk'] = {
'value': ec2utils.image_ec2_id(ramdisk_id, 'ari')
}
supported_attributes = {
'blockDeviceMapping': _block_device_mapping_attribute,
'launchPermission': _launch_permission_attribute,
'rootDeviceName': _root_device_name_attribute,
'kernel': _kernel_attribute,
'ramdisk': _ramdisk_attribute,
}
fn = supported_attributes.get(attribute)
if fn is None:
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id}
fn(image, result)
return result
def modify_image_attribute(self, context, image_id, attribute,
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
if not 'user_group' in kwargs:
raise exception.EC2APIError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.EC2APIError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
msg = _('operation_type must be add or remove')
raise exception.EC2APIError(msg)
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
image['is_public'] = (operation_type == 'add')
try:
return self.image_service.update(context, internal_id, image)
except exception.ImageNotAuthorized:
msg = _('Not allowed to modify attributes for image %s')
raise exception.EC2APIError(msg % image_id)
def update_image(self, context, image_id, **kwargs):
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result
# TODO(yamahata): race condition
# At the moment there is no way to prevent others from
# manipulating instances/volumes/snapshots.
# As other code doesn't take it into consideration, here we don't
# care of it for now. Ostrich algorithm
def create_image(self, context, instance_id, **kwargs):
# NOTE(yamahata): name/description are ignored by register_image(),
# do so here
no_reboot = kwargs.get('no_reboot', False)
name = kwargs.get('name')
validate_ec2_id(instance_id)
ec2_instance_id = instance_id
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
instance = self.compute_api.get(context, instance_id)
bdms = self.compute_api.get_instance_bdms(context, instance)
# CreateImage only supported for the analogue of EBS-backed instances
if not self.compute_api.is_volume_backed_instance(context, instance,
bdms):
root = instance['root_device_name']
msg = _("Invalid value '%(ec2_instance_id)s' for instanceId. "
"Instance does not have a volume attached at root "
"(%(root)s)") % locals()
raise exception.InvalidParameterValue(err=msg)
# stop the instance if necessary
restart_instance = False
if not no_reboot:
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance)
# wait instance for really stopped
start_time = time.time()
while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_id)
vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
timeout = 1 * 60 * 60
if time.time() > start_time + timeout:
raise exception.EC2APIError(
_('Couldn\'t stop instance with in %d sec') % timeout)
glance_uuid = instance['image_ref']
ec2_image_id = ec2utils.glance_id_to_ec2_id(context, glance_uuid)
src_image = self._get_image(context, ec2_image_id)
image_meta = dict(src_image)
def _unmap_id_property(properties, name):
if properties[name]:
properties[name] = ec2utils.id_to_glance_id(context,
properties[name])
# ensure the ID properties are unmapped back to the glance UUID
_unmap_id_property(image_meta['properties'], 'kernel_id')
_unmap_id_property(image_meta['properties'], 'ramdisk_id')
# meaningful image name
name_map = dict(instance=instance['uuid'], now=timeutils.isotime())
name = name or _('image of %(instance)s at %(now)s') % name_map
new_image = self.compute_api.snapshot_volume_backed(context,
instance,
image_meta,
name)
ec2_id = ec2utils.glance_id_to_ec2_id(context, new_image['id'])
if restart_instance:
self.compute_api.start(context, instance)
return {'imageId': ec2_id}
class CloudSecurityGroupAPI(compute.api.SecurityGroupAPI):
@staticmethod
def raise_invalid_property(msg):
raise exception.InvalidParameterValue(err=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.InvalidGroup(reason=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
if decoding_exception:
raise decoding_exception
else:
raise exception.EC2APIError(_("Invalid CIDR"))
@staticmethod
def raise_over_quota(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_not_found(msg):
pass
| |
# -*- coding: utf-8 -*-
"""
Document Library - Controllers
"""
module = request.controller
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# =============================================================================
def index():
"Module's Home Page"
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def document():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.method in ("create", "create.popup"):
# Coming from Profile page
doc_id = get_vars.get("~.doc_id", None)
if doc_id:
s3db.doc_document.doc_id.default = doc_id
return True
s3.prep = prep
output = s3_rest_controller(rheader=document_rheader)
return output
# -----------------------------------------------------------------------------
def document_rheader(r):
if r.representation == "html":
doc_document = r.record
if doc_document:
#rheader_tabs = s3_rheader_tabs(r, document_tabs(r))
table = db.doc_document
rheader = DIV(B("%s: " % T("Name")), doc_document.name,
TABLE(TR(
TH("%s: " % T("File")), table.file.represent( doc_document.file ),
TH("%s: " % T("URL")), table.url.represent( doc_document.url ),
),
TR(
TH("%s: " % ORGANISATION), table.organisation_id.represent( doc_document.organisation_id ),
TH("%s: " % T("Person")), table.person_id.represent( doc_document.organisation_id ),
),
),
#rheader_tabs
)
return rheader
return None
# -----------------------------------------------------------------------------
def document_tabs(r):
"""
Display the number of Components in the tabs
- currently unused as we don't have these tabs off documents
"""
tab_opts = [{"tablename": "assess_rat",
"resource": "rat",
"one_title": "1 Assessment",
"num_title": " Assessments",
},
{"tablename": "irs_ireport",
"resource": "ireport",
"one_title": "1 Incident Report",
"num_title": " Incident Reports",
},
{"tablename": "cr_shelter",
"resource": "shelter",
"one_title": "1 Shelter",
"num_title": " Shelters",
},
#{"tablename": "flood_freport",
# "resource": "freport",
# "one_title": "1 Flood Report",
# "num_title": " Flood Reports",
#},
{"tablename": "req_req",
"resource": "req",
"one_title": "1 Request",
"num_title": " Requests",
},
]
tabs = [(T("Details"), None)]
crud_string = s3base.S3CRUD.crud_string
for tab_opt in tab_opts:
tablename = tab_opt["tablename"]
if tablename in db and document_id in db[tablename]:
table = db[tablename]
query = (table.deleted == False) & \
(table.document_id == r.id)
tab_count = db(query).count()
if tab_count == 0:
label = crud_string(tablename, "label_create")
elif tab_count == 1:
label = tab_opt["one_title"]
else:
label = T(str(tab_count) + tab_opt["num_title"] )
tabs.append( (label, tab_opt["resource"] ) )
return tabs
# =============================================================================
def source():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
def image():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.method in ("create", "create.popup"):
# Coming from Profile page
doc_id = get_vars.get("~.doc_id", None)
if doc_id:
s3db.doc_image.doc_id.default = doc_id
return True
s3.prep = prep
def postp(r, output):
if r.method == "update" and r.http == "POST":
points = r.vars.get("imagecrop-points")
if not points:
return output
filename = r.resource.records()[0]["file"]
points = map(float, points.split(","))
path = os.path.join(request.folder, "uploads", "images", filename)
current.s3task.async("crop_image",
args=[path] + points + [S3ImageCropWidget.DEFAULT_WIDTH])
return output
s3.postp = postp
output = s3_rest_controller()
return output
# =============================================================================
def bulk_upload():
"""
Custom view to allow bulk uploading of Photos
@ToDo: Allow creation of a GIS Feature Layer to view on the map
@ToDo: Allow uploading of associated GPX track for timestamp correlation.
See r1595 for the previous draft of this work
"""
s3.stylesheets.append("plugins/fileuploader.css")
return dict()
def upload_bulk():
"""
Receive the Uploaded data from bulk_upload()
https://github.com/valums/file-uploader/blob/master/server/readme.txt
@ToDo: Read EXIF headers to geolocate the Photos
"""
tablename = "doc_image"
table = s3db[tablename]
import cgi
source = request.post_vars.get("qqfile", None)
if isinstance(source, cgi.FieldStorage) and source.filename:
# For IE6-8, Opera, older versions of other browsers you get the file as you normally do with regular form-base uploads.
name = source.filename
image = source.file
else:
# For browsers which upload file with progress bar, you will need to get the raw post data and write it to the file.
if "name" in request.vars:
name = request.vars.name
else:
HTTP(400, "Invalid Request: Need a Name!")
image = request.body.read()
# Convert to StringIO for onvalidation/import
import cStringIO
image = cStringIO.StringIO(image)
source = Storage()
source.filename = name
source.file = image
form = SQLFORM(table)
vars = Storage()
vars.name = name
vars.image = source
vars._formname = "%s_create" % tablename
# onvalidation callback
onvalidation = s3db.get_config(tablename, "create_onvalidation",
s3db.get_config(tablename, "onvalidation"))
if form.accepts(vars, onvalidation=onvalidation):
msg = Storage(success = True)
# onaccept callback
onaccept = s3db.get_config(tablename, "create_onaccept",
s3db.get_config(tablename, "onaccept"))
from gluon.tools import callback
callback(onaccept, form, tablename=tablename)
else:
error_msg = ""
for error in form.errors:
error_msg = "%s\n%s:%s" % (error_msg, error, form.errors[error])
msg = Storage(error = error_msg)
response.headers["Content-Type"] = "text/html" # This is what the file-uploader widget expects
return json.dumps(msg)
# -----------------------------------------------------------------------------
def sitrep():
""" RESTful CRUD controller """
return s3_rest_controller()
# END =========================================================================
| |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from taskflow import task
from taskflow.types import failure
import tenacity
from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker import task_utils
from octavia.db import api as db_apis
from octavia.db import repositories
from octavia.network import base
from octavia.network import data_models as n_data_models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseNetworkTask(task.Task):
"""Base task to load drivers common to the tasks."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._network_driver = None
self.task_utils = task_utils.TaskUtils()
self.lb_repo = repositories.LoadBalancerRepository()
@property
def network_driver(self):
if self._network_driver is None:
self._network_driver = utils.get_network_driver()
return self._network_driver
class CalculateAmphoraDelta(BaseNetworkTask):
default_provides = constants.DELTA
def execute(self, loadbalancer, amphora, availability_zone,
vrrp_port=None):
LOG.debug("Calculating network delta for amphora id: %s", amphora.id)
if vrrp_port is None:
vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id)
if (availability_zone and
availability_zone.get(constants.MANAGEMENT_NETWORK)):
management_nets = [availability_zone.get(
constants.MANAGEMENT_NETWORK)]
else:
management_nets = CONF.controller_worker.amp_boot_network_list
desired_network_ids = {vrrp_port.network_id}.union(management_nets)
for pool in loadbalancer.pools:
member_networks = [
self.network_driver.get_subnet(member.subnet_id).network_id
for member in pool.members
if member.subnet_id
]
desired_network_ids.update(member_networks)
nics = self.network_driver.get_plugged_networks(amphora.compute_id)
# assume we don't have two nics in the same network
actual_network_nics = dict((nic.network_id, nic) for nic in nics)
del_ids = set(actual_network_nics) - desired_network_ids
delete_nics = list(
actual_network_nics[net_id] for net_id in del_ids)
add_ids = desired_network_ids - set(actual_network_nics)
add_nics = list(n_data_models.Interface(
network_id=net_id) for net_id in add_ids)
delta = n_data_models.Delta(
amphora_id=amphora.id, compute_id=amphora.compute_id,
add_nics=add_nics, delete_nics=delete_nics)
return delta
class CalculateDelta(BaseNetworkTask):
"""Task to calculate the delta between
the nics on the amphora and the ones
we need. Returns a list for
plumbing them.
"""
default_provides = constants.DELTAS
def execute(self, loadbalancer, availability_zone):
"""Compute which NICs need to be plugged
for the amphora to become operational.
:param loadbalancer: the loadbalancer to calculate deltas for all
amphorae
:param availability_zone: availability zone metadata dict
:returns: dict of octavia.network.data_models.Delta keyed off amphora
id
"""
calculate_amp = CalculateAmphoraDelta()
deltas = {}
for amphora in filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):
delta = calculate_amp.execute(loadbalancer, amphora,
availability_zone)
deltas[amphora.id] = delta
return deltas
class GetPlumbedNetworks(BaseNetworkTask):
"""Task to figure out the NICS on an amphora.
This will likely move into the amphora driver
:returns: Array of networks
"""
default_provides = constants.NICS
def execute(self, amphora):
"""Get plumbed networks for the amphora."""
LOG.debug("Getting plumbed networks for amphora id: %s", amphora.id)
return self.network_driver.get_plugged_networks(amphora.compute_id)
class PlugNetworks(BaseNetworkTask):
"""Task to plug the networks.
This uses the delta to add all missing networks/nics
"""
def execute(self, amphora, delta):
"""Update the amphora networks for the delta."""
LOG.debug("Plug or unplug networks for amphora id: %s", amphora.id)
if not delta:
LOG.debug("No network deltas for amphora id: %s", amphora.id)
return
# add nics
for nic in delta.add_nics:
self.network_driver.plug_network(amphora.compute_id,
nic.network_id)
def revert(self, amphora, delta, *args, **kwargs):
"""Handle a failed network plug by removing all nics added."""
LOG.warning("Unable to plug networks for amp id %s", amphora.id)
if not delta:
return
for nic in delta.add_nics:
try:
self.network_driver.unplug_network(amphora.compute_id,
nic.network_id)
except base.NetworkNotFound:
pass
class UnPlugNetworks(BaseNetworkTask):
"""Task to unplug the networks
Loop over all nics and unplug them
based on delta
"""
def execute(self, amphora, delta):
"""Unplug the networks."""
LOG.debug("Unplug network for amphora")
if not delta:
LOG.debug("No network deltas for amphora id: %s", amphora.id)
return
for nic in delta.delete_nics:
try:
self.network_driver.unplug_network(amphora.compute_id,
nic.network_id)
except base.NetworkNotFound:
LOG.debug("Network %d not found", nic.network_id)
except Exception:
LOG.exception("Unable to unplug network")
# TODO(xgerman) follow up if that makes sense
class GetMemberPorts(BaseNetworkTask):
def execute(self, loadbalancer, amphora):
vip_port = self.network_driver.get_port(loadbalancer.vip.port_id)
member_ports = []
interfaces = self.network_driver.get_plugged_networks(
amphora.compute_id)
for interface in interfaces:
port = self.network_driver.get_port(interface.port_id)
if vip_port.network_id == port.network_id:
continue
port.network = self.network_driver.get_network(port.network_id)
for fixed_ip in port.fixed_ips:
if amphora.lb_network_ip == fixed_ip.ip_address:
break
fixed_ip.subnet = self.network_driver.get_subnet(
fixed_ip.subnet_id)
# Only add the port to the list if the IP wasn't the mgmt IP
else:
member_ports.append(port)
return member_ports
class HandleNetworkDelta(BaseNetworkTask):
"""Task to plug and unplug networks
Plug or unplug networks based on delta
"""
def execute(self, amphora, delta):
"""Handle network plugging based off deltas."""
added_ports = {}
added_ports[amphora.id] = []
for nic in delta.add_nics:
interface = self.network_driver.plug_network(delta.compute_id,
nic.network_id)
port = self.network_driver.get_port(interface.port_id)
port.network = self.network_driver.get_network(port.network_id)
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self.network_driver.get_subnet(
fixed_ip.subnet_id)
added_ports[amphora.id].append(port)
for nic in delta.delete_nics:
try:
self.network_driver.unplug_network(delta.compute_id,
nic.network_id)
except base.NetworkNotFound:
LOG.debug("Network %d not found ", nic.network_id)
except Exception:
LOG.exception("Unable to unplug network")
return added_ports
def revert(self, result, amphora, delta, *args, **kwargs):
"""Handle a network plug or unplug failures."""
if isinstance(result, failure.Failure):
return
if not delta:
return
LOG.warning("Unable to plug networks for amp id %s",
delta.amphora_id)
for nic in delta.add_nics:
try:
self.network_driver.unplug_network(delta.compute_id,
nic.network_id)
except Exception:
pass
class HandleNetworkDeltas(BaseNetworkTask):
"""Task to plug and unplug networks
Loop through the deltas and plug or unplug
networks based on delta
"""
def execute(self, deltas):
"""Handle network plugging based off deltas."""
added_ports = {}
for amp_id, delta in deltas.items():
added_ports[amp_id] = []
for nic in delta.add_nics:
interface = self.network_driver.plug_network(delta.compute_id,
nic.network_id)
port = self.network_driver.get_port(interface.port_id)
port.network = self.network_driver.get_network(port.network_id)
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self.network_driver.get_subnet(
fixed_ip.subnet_id)
added_ports[amp_id].append(port)
for nic in delta.delete_nics:
try:
self.network_driver.unplug_network(delta.compute_id,
nic.network_id)
except base.NetworkNotFound:
LOG.debug("Network %d not found ", nic.network_id)
except Exception:
LOG.exception("Unable to unplug network")
return added_ports
def revert(self, result, deltas, *args, **kwargs):
"""Handle a network plug or unplug failures."""
if isinstance(result, failure.Failure):
return
for amp_id, delta in deltas.items():
LOG.warning("Unable to plug networks for amp id %s",
delta.amphora_id)
if not delta:
return
for nic in delta.add_nics:
try:
self.network_driver.unplug_network(delta.compute_id,
nic.network_id)
except base.NetworkNotFound:
pass
class PlugVIP(BaseNetworkTask):
"""Task to plumb a VIP."""
def execute(self, loadbalancer):
"""Plumb a vip to an amphora."""
LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id)
amps_data = self.network_driver.plug_vip(loadbalancer,
loadbalancer.vip)
return amps_data
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failure to plumb a vip."""
if isinstance(result, failure.Failure):
return
LOG.warning("Unable to plug VIP for loadbalancer id %s",
loadbalancer.id)
try:
# Make sure we have the current port IDs for cleanup
for amp_data in result:
for amphora in filter(
# pylint: disable=cell-var-from-loop
lambda amp: amp.id == amp_data.id,
loadbalancer.amphorae):
amphora.vrrp_port_id = amp_data.vrrp_port_id
amphora.ha_port_id = amp_data.ha_port_id
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
except Exception as e:
LOG.error("Failed to unplug VIP. Resources may still "
"be in use from vip: %(vip)s due to error: %(except)s",
{'vip': loadbalancer.vip.ip_address, 'except': str(e)})
class UpdateVIPSecurityGroup(BaseNetworkTask):
"""Task to setup SG for LB."""
def execute(self, loadbalancer_id):
"""Task to setup SG for LB.
Task is idempotent and safe to retry.
"""
LOG.debug("Setting up VIP SG for load balancer id: %s",
loadbalancer_id)
loadbalancer = self.lb_repo.get(db_apis.get_session(),
id=loadbalancer_id)
sg_id = self.network_driver.update_vip_sg(loadbalancer,
loadbalancer.vip)
LOG.info("Set up VIP SG %s for load balancer %s complete",
sg_id if sg_id else "None", loadbalancer_id)
return sg_id
class GetSubnetFromVIP(BaseNetworkTask):
"""Task to plumb a VIP."""
def execute(self, loadbalancer):
"""Plumb a vip to an amphora."""
LOG.debug("Getting subnet for LB: %s", loadbalancer.id)
subnet = self.network_driver.get_subnet(loadbalancer.vip.subnet_id)
LOG.info("Got subnet %s for load balancer %s",
loadbalancer.vip.subnet_id if subnet else "None",
loadbalancer.id)
return subnet
class PlugVIPAmpphora(BaseNetworkTask):
"""Task to plumb a VIP."""
def execute(self, loadbalancer, amphora, subnet):
"""Plumb a vip to an amphora."""
LOG.debug("Plumbing VIP for amphora id: %s", amphora.id)
amp_data = self.network_driver.plug_aap_port(
loadbalancer, loadbalancer.vip, amphora, subnet)
return amp_data
def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
"""Handle a failure to plumb a vip."""
if isinstance(result, failure.Failure):
return
LOG.warning("Unable to plug VIP for amphora id %s "
"load balancer id %s",
amphora.id, loadbalancer.id)
try:
amphora.vrrp_port_id = result.vrrp_port_id
amphora.ha_port_id = result.ha_port_id
self.network_driver.unplug_aap_port(loadbalancer.vip,
amphora, subnet)
except Exception as e:
LOG.error('Failed to unplug AAP port. Resources may still be in '
'use for VIP: %s due to error: %s', loadbalancer.vip,
str(e))
class UnplugVIP(BaseNetworkTask):
"""Task to unplug the vip."""
def execute(self, loadbalancer):
"""Unplug the vip."""
LOG.debug("Unplug vip on amphora")
try:
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
except Exception:
LOG.exception("Unable to unplug vip from load balancer %s",
loadbalancer.id)
class AllocateVIP(BaseNetworkTask):
"""Task to allocate a VIP."""
def execute(self, loadbalancer):
"""Allocate a vip to the loadbalancer."""
LOG.debug("Allocating vip port id %s, subnet id %s, ip address %s for "
"load balancer %s",
loadbalancer.vip.port_id,
loadbalancer.vip.subnet_id,
loadbalancer.vip.ip_address,
loadbalancer.id)
vip = self.network_driver.allocate_vip(loadbalancer)
LOG.info("Allocated vip with port id %s, subnet id %s, ip address %s "
"for load balancer %s",
loadbalancer.vip.port_id,
loadbalancer.vip.subnet_id,
loadbalancer.vip.ip_address,
loadbalancer.id)
return vip
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failure to allocate vip."""
if isinstance(result, failure.Failure):
LOG.exception("Unable to allocate VIP")
return
vip = result
LOG.warning("Deallocating vip %s", vip.ip_address)
try:
self.network_driver.deallocate_vip(vip)
except Exception as e:
LOG.error("Failed to deallocate VIP. Resources may still "
"be in use from vip: %(vip)s due to error: %(except)s",
{'vip': vip.ip_address, 'except': str(e)})
class AllocateVIPforFailover(AllocateVIP):
"""Task to allocate/validate the VIP for a failover flow."""
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failure to allocate vip."""
if isinstance(result, failure.Failure):
LOG.exception("Unable to allocate VIP")
return
vip = result
LOG.info("Failover revert is not deallocating vip %s because this is "
"a failover.", vip.ip_address)
class DeallocateVIP(BaseNetworkTask):
"""Task to deallocate a VIP."""
def execute(self, loadbalancer):
"""Deallocate a VIP."""
LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address)
# NOTE(blogan): this is kind of ugly but sufficient for now. Drivers
# will need access to the load balancer that the vip is/was attached
# to. However the data model serialization for the vip does not give a
# backref to the loadbalancer if accessed through the loadbalancer.
vip = loadbalancer.vip
vip.load_balancer = loadbalancer
self.network_driver.deallocate_vip(vip)
class UpdateVIP(BaseNetworkTask):
"""Task to update a VIP."""
def execute(self, loadbalancer):
LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id)
self.network_driver.update_vip(loadbalancer)
class UpdateVIPForDelete(BaseNetworkTask):
"""Task to update a VIP for listener delete flows."""
def execute(self, loadbalancer):
LOG.debug("Updating VIP for listener delete on load_balancer %s.",
loadbalancer.id)
self.network_driver.update_vip(loadbalancer, for_delete=True)
class GetAmphoraNetworkConfigs(BaseNetworkTask):
"""Task to retrieve amphora network details."""
def execute(self, loadbalancer, amphora=None):
LOG.debug("Retrieving vip network details.")
return self.network_driver.get_network_configs(loadbalancer,
amphora=amphora)
class GetAmphoraNetworkConfigsByID(BaseNetworkTask):
"""Task to retrieve amphora network details."""
def execute(self, loadbalancer_id, amphora_id=None):
LOG.debug("Retrieving vip network details.")
amp_repo = repositories.AmphoraRepository()
loadbalancer = self.lb_repo.get(db_apis.get_session(),
id=loadbalancer_id)
amphora = amp_repo.get(db_apis.get_session(), id=amphora_id)
return self.network_driver.get_network_configs(loadbalancer,
amphora=amphora)
class GetAmphoraeNetworkConfigs(BaseNetworkTask):
"""Task to retrieve amphorae network details."""
def execute(self, loadbalancer_id):
LOG.debug("Retrieving vip network details.")
loadbalancer = self.lb_repo.get(db_apis.get_session(),
id=loadbalancer_id)
return self.network_driver.get_network_configs(loadbalancer)
class FailoverPreparationForAmphora(BaseNetworkTask):
"""Task to prepare an amphora for failover."""
def execute(self, amphora):
LOG.debug("Prepare amphora %s for failover.", amphora.id)
self.network_driver.failover_preparation(amphora)
class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask):
"""Task retrieving all the port ids on an amphora, except lb network."""
def execute(self, amphora):
LOG.debug("Retrieve all but the lb network port id on amphora %s.",
amphora.id)
interfaces = self.network_driver.get_plugged_networks(
compute_id=amphora.compute_id)
ports = []
for interface_ in interfaces:
if interface_.port_id not in ports:
port = self.network_driver.get_port(port_id=interface_.port_id)
ips = port.fixed_ips
lb_network = False
for ip in ips:
if ip.ip_address == amphora.lb_network_ip:
lb_network = True
if not lb_network:
ports.append(port)
return ports
class PlugPorts(BaseNetworkTask):
"""Task to plug neutron ports into a compute instance."""
def execute(self, amphora, ports):
for port in ports:
LOG.debug('Plugging port ID: %(port_id)s into compute instance: '
'%(compute_id)s.',
{'port_id': port.id, 'compute_id': amphora.compute_id})
self.network_driver.plug_port(amphora, port)
class ApplyQos(BaseNetworkTask):
"""Apply Quality of Services to the VIP"""
def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id,
is_revert=False, request_qos_id=None):
"""Call network driver to apply QoS Policy on the vrrp ports."""
if not amps_data:
amps_data = loadbalancer.amphorae
apply_qos = ApplyQosAmphora()
for amp_data in amps_data:
apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data,
qos_policy_id)
def execute(self, loadbalancer, amps_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer.vip.qos_policy_id
if not qos_policy_id and (
not update_dict or (
'vip' not in update_dict or
'qos_policy_id' not in update_dict['vip'])):
return
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id)
def revert(self, result, loadbalancer, amps_data=None, update_dict=None,
*args, **kwargs):
"""Handle a failure to apply QoS to VIP"""
request_qos_id = loadbalancer.vip.qos_policy_id
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
loadbalancer.id)
orig_qos_id = orig_lb.vip.qos_policy_id
if request_qos_id != orig_qos_id:
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id,
is_revert=True,
request_qos_id=request_qos_id)
class ApplyQosAmphora(BaseNetworkTask):
"""Apply Quality of Services to the VIP"""
def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id,
is_revert=False, request_qos_id=None):
"""Call network driver to apply QoS Policy on the vrrp ports."""
try:
self.network_driver.apply_qos_on_port(qos_policy_id,
amp_data.vrrp_port_id)
except Exception:
if not is_revert:
raise
LOG.warning('Failed to undo qos policy %(qos_id)s '
'on vrrp port: %(port)s from '
'amphorae: %(amp)s',
{'qos_id': request_qos_id,
'port': amp_data.vrrp_port_id,
'amp': [amp.id for amp in amp_data]})
def execute(self, loadbalancer, amp_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer.vip.qos_policy_id
if not qos_policy_id and (
update_dict and (
'vip' not in update_dict or
'qos_policy_id' not in update_dict['vip'])):
return
self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id)
def revert(self, result, loadbalancer, amp_data=None, update_dict=None,
*args, **kwargs):
"""Handle a failure to apply QoS to VIP"""
try:
request_qos_id = loadbalancer.vip.qos_policy_id
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
loadbalancer.id)
orig_qos_id = orig_lb.vip.qos_policy_id
if request_qos_id != orig_qos_id:
self._apply_qos_on_vrrp_port(loadbalancer, amp_data,
orig_qos_id, is_revert=True,
request_qos_id=request_qos_id)
except Exception as e:
LOG.error('Failed to remove QoS policy: %s from port: %s due '
'to error: %s', orig_qos_id, amp_data.vrrp_port_id,
str(e))
class DeletePort(BaseNetworkTask):
"""Task to delete a network port."""
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
stop=tenacity.stop_after_attempt(
CONF.networking.max_retries),
wait=tenacity.wait_exponential(
multiplier=CONF.networking.retry_backoff,
min=CONF.networking.retry_interval,
max=CONF.networking.retry_max), reraise=True)
def execute(self, port_id, passive_failure=False):
"""Delete the network port."""
if port_id is None:
return
if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
LOG.debug("Deleting network port %s", port_id)
else:
LOG.warning('Retrying network port %s delete attempt %s of %s.',
port_id,
self.execute.retry.statistics[
constants.ATTEMPT_NUMBER],
self.execute.retry.stop.max_attempt_number)
# Let the Taskflow engine know we are working and alive
# Don't use get with a default for 'attempt_number', we need to fail
# if that number is missing.
self.update_progress(
self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
self.execute.retry.stop.max_attempt_number)
try:
self.network_driver.delete_port(port_id)
except Exception:
if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
self.execute.retry.stop.max_attempt_number):
LOG.warning('Network port delete for port id: %s failed. '
'Retrying.', port_id)
raise
if passive_failure:
LOG.exception('Network port delete for port ID: %s failed. '
'This resource will be abandoned and should '
'manually be cleaned up once the '
'network service is functional.', port_id)
# Let's at least attempt to disable it so if the instance
# comes back from the dead it doesn't conflict with anything.
try:
self.network_driver.admin_down_port(port_id)
LOG.info('Successfully disabled (admin down) network port '
'%s that failed to delete.', port_id)
except Exception:
LOG.warning('Attempt to disable (admin down) network port '
'%s failed. The network service has failed. '
'Continuing.', port_id)
else:
LOG.exception('Network port delete for port ID: %s failed. '
'The network service has failed. '
'Aborting and reverting.', port_id)
raise
class CreateVIPBasePort(BaseNetworkTask):
"""Task to create the VIP base port for an amphora."""
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
stop=tenacity.stop_after_attempt(
CONF.networking.max_retries),
wait=tenacity.wait_exponential(
multiplier=CONF.networking.retry_backoff,
min=CONF.networking.retry_interval,
max=CONF.networking.retry_max), reraise=True)
def execute(self, vip, vip_sg_id, amphora_id):
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
fixed_ips = [{constants.SUBNET_ID: vip.subnet_id}]
sg_id = []
if vip_sg_id:
sg_id = [vip_sg_id]
port = self.network_driver.create_port(
vip.network_id, name=port_name, fixed_ips=fixed_ips,
secondary_ips=[vip.ip_address], security_group_ids=sg_id,
qos_policy_id=vip.qos_policy_id)
LOG.info('Created port %s with ID %s for amphora %s',
port_name, port.id, amphora_id)
return port
def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs):
if isinstance(result, failure.Failure):
return
try:
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
for port in result:
self.network_driver.delete_port(port.id)
LOG.info('Deleted port %s with ID %s for amphora %s due to a '
'revert.', port_name, port.id, amphora_id)
except Exception as e:
LOG.error('Failed to delete port %s. Resources may still be in '
'use for a port intended for amphora %s due to error '
'%s. Search for a port named %s',
result, amphora_id, str(e), port_name)
class AdminDownPort(BaseNetworkTask):
def execute(self, port_id):
try:
self.network_driver.set_port_admin_state_up(port_id, False)
except base.PortNotFound:
return
for i in range(CONF.networking.max_retries):
port = self.network_driver.get_port(port_id)
if port.status == constants.DOWN:
LOG.debug('Disabled port: %s', port_id)
return
LOG.debug('Port %s is %s instead of DOWN, waiting.',
port_id, port.status)
time.sleep(CONF.networking.retry_interval)
LOG.error('Port %s failed to go DOWN. Port status is still %s. '
'Ignoring and continuing.', port_id, port.status)
def revert(self, result, port_id, *args, **kwargs):
if isinstance(result, failure.Failure):
return
try:
self.network_driver.set_port_admin_state_up(port_id, True)
except Exception as e:
LOG.error('Failed to bring port %s admin up on revert due to: %s.',
port_id, str(e))
class GetVIPSecurityGroupID(BaseNetworkTask):
def execute(self, loadbalancer_id):
sg_name = utils.get_vip_security_group_name(loadbalancer_id)
try:
security_group = self.network_driver.get_security_group(sg_name)
if security_group:
return security_group.id
except base.SecurityGroupNotFound:
with excutils.save_and_reraise_exception() as ctxt:
if self.network_driver.sec_grp_enabled:
LOG.error('VIP security group %s was not found.', sg_name)
else:
ctxt.reraise = False
return None
| |
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError, ConvergenceWarning
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
y : Ignored
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters), ConvergenceWarning)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| |
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db.models import Q
from django.utils.functional import cached_property
class BasePermissionPolicy:
"""
A 'permission policy' is an object that handles all decisions about the actions
users are allowed to perform on a given model. The mechanism by which it does this
is arbitrary, and may or may not involve the django.contrib.auth Permission model;
it could be as simple as "allow all users to do everything".
In this way, admin apps can change their permission-handling logic just by swapping
to a different policy object, rather than having that logic spread across numerous
view functions.
BasePermissionPolicy is an abstract class that all permission policies inherit from.
The only method that subclasses need to implement is users_with_any_permission;
all other methods can be derived from that (but in practice, subclasses will probably
want to override additional methods, either for efficiency or to implement more
fine-grained permission logic).
"""
def __init__(self, model):
self.model = model
# Basic user permission tests. Most policies are expected to override these,
# since the default implementation is to query the set of permitted users
# (which is pretty inefficient).
def user_has_permission(self, user, action):
"""
Return whether the given user has permission to perform the given action
on some or all instances of this model
"""
return user in self.users_with_permission(action)
def user_has_any_permission(self, user, actions):
"""
Return whether the given user has permission to perform any of the given actions
on some or all instances of this model
"""
return any(self.user_has_permission(user, action) for action in actions)
# Operations for retrieving a list of users matching the permission criteria.
# All policies must implement, at minimum, users_with_any_permission.
def users_with_any_permission(self, actions):
"""
Return a queryset of users who have permission to perform any of the given actions
on some or all instances of this model
"""
raise NotImplementedError
def users_with_permission(self, action):
"""
Return a queryset of users who have permission to perform the given action on
some or all instances of this model
"""
return self.users_with_any_permission([action])
# Per-instance permission tests. In the simplest cases - corresponding to the
# basic Django permission model - permissions are enforced on a per-model basis
# and so these methods can simply defer to the per-model tests. Policies that
# require per-instance permission logic must override, at minimum:
# user_has_permission_for_instance
# instances_user_has_any_permission_for
# users_with_any_permission_for_instance
def user_has_permission_for_instance(self, user, action, instance):
"""
Return whether the given user has permission to perform the given action on the
given model instance
"""
return self.user_has_permission(user, action)
def user_has_any_permission_for_instance(self, user, actions, instance):
"""
Return whether the given user has permission to perform any of the given actions
on the given model instance
"""
return any(
self.user_has_permission_for_instance(user, action, instance)
for action in actions
)
def instances_user_has_any_permission_for(self, user, actions):
"""
Return a queryset of all instances of this model for which the given user has
permission to perform any of the given actions
"""
if self.user_has_any_permission(user, actions):
return self.model.objects.all()
else:
return self.model.objects.none()
def instances_user_has_permission_for(self, user, action):
"""
Return a queryset of all instances of this model for which the given user has
permission to perform the given action
"""
return self.instances_user_has_any_permission_for(user, [action])
def users_with_any_permission_for_instance(self, actions, instance):
"""
Return a queryset of all users who have permission to perform any of the given
actions on the given model instance
"""
return self.users_with_any_permission(actions)
def users_with_permission_for_instance(self, action, instance):
return self.users_with_any_permission_for_instance([action], instance)
class BlanketPermissionPolicy(BasePermissionPolicy):
"""
A permission policy that gives everyone (including anonymous users)
full permission over the given model
"""
def user_has_permission(self, user, action):
return True
def user_has_any_permission(self, user, actions):
return True
def users_with_any_permission(self, actions):
# Here we filter out inactive users from the results, even though inactive users
# - and for that matter anonymous users - still have permission according to the
# user_has_permission method. This is appropriate because, for most applications,
# setting is_active=False is equivalent to deleting the user account; you would
# not want these accounts to appear in, for example, a dropdown of users to
# assign a task to. The result here could never be completely logically correct
# (because it will not include anonymous users), so as the next best thing we
# return the "least surprise" result.
return get_user_model().objects.filter(is_active=True)
def users_with_permission(self, action):
return get_user_model().objects.filter(is_active=True)
class AuthenticationOnlyPermissionPolicy(BasePermissionPolicy):
"""
A permission policy that gives all active authenticated users
full permission over the given model
"""
def user_has_permission(self, user, action):
return user.is_authenticated and user.is_active
def user_has_any_permission(self, user, actions):
return user.is_authenticated and user.is_active
def users_with_any_permission(self, actions):
return get_user_model().objects.filter(is_active=True)
def users_with_permission(self, action):
return get_user_model().objects.filter(is_active=True)
class BaseDjangoAuthPermissionPolicy(BasePermissionPolicy):
"""
Extends BasePermissionPolicy with helper methods useful for policies that need to
perform lookups against the django.contrib.auth permission model
"""
def __init__(self, model, auth_model=None):
# `auth_model` specifies the model to be used for permission record lookups;
# usually this will match `model` (which specifies the type of instances that
# `instances_user_has_permission_for` will return), but this may differ when
# swappable models are in use - for example, an interface for editing user
# records might use a custom User model but will typically still refer to the
# permission records for auth.user.
super().__init__(model)
self.auth_model = auth_model or self.model
self.app_label = self.auth_model._meta.app_label
self.model_name = self.auth_model._meta.model_name
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self.auth_model)
def _get_permission_name(self, action):
"""
Get the full app-label-qualified permission name (as required by
user.has_perm(...) ) for the given action on this model
"""
return "%s.%s_%s" % (self.app_label, action, self.model_name)
def _get_users_with_any_permission_codenames_filter(self, permission_codenames):
"""
Given a list of permission codenames, return a filter expression which
will find all users which have any of those permissions - either
through group permissions, user permissions, or implicitly through
being a superuser.
"""
permissions = Permission.objects.filter(
content_type=self._content_type, codename__in=permission_codenames
)
return (
Q(is_superuser=True)
| Q(user_permissions__in=permissions)
| Q(groups__permissions__in=permissions)
) & Q(is_active=True)
def _get_users_with_any_permission_codenames(self, permission_codenames):
"""
Given a list of permission codenames, return a queryset of users which
have any of those permissions - either through group permissions, user
permissions, or implicitly through being a superuser.
"""
filter_expr = self._get_users_with_any_permission_codenames_filter(
permission_codenames
)
return get_user_model().objects.filter(filter_expr).distinct()
class ModelPermissionPolicy(BaseDjangoAuthPermissionPolicy):
"""
A permission policy that enforces permissions at the model level, by consulting
the standard django.contrib.auth permission model directly
"""
def user_has_permission(self, user, action):
return user.has_perm(self._get_permission_name(action))
def users_with_any_permission(self, actions):
permission_codenames = [
"%s_%s" % (action, self.model_name) for action in actions
]
return self._get_users_with_any_permission_codenames(permission_codenames)
class OwnershipPermissionPolicy(BaseDjangoAuthPermissionPolicy):
"""
A permission policy for objects that support a concept of 'ownership', where
the owner is typically the user who created the object.
This policy piggybacks off 'add' and 'change' permissions defined through the
django.contrib.auth Permission model, as follows:
* any user with 'add' permission can create instances, and ALSO edit instances
that they own
* any user with 'change' permission can edit instances regardless of ownership
* ability to edit also implies ability to delete
Besides 'add', 'change' and 'delete', no other actions are recognised or permitted
(unless the user is an active superuser, in which case they can do everything).
"""
def __init__(self, model, auth_model=None, owner_field_name="owner"):
super().__init__(model, auth_model=auth_model)
self.owner_field_name = owner_field_name
# make sure owner_field_name is a field that exists on the model
try:
self.model._meta.get_field(self.owner_field_name)
except FieldDoesNotExist:
raise ImproperlyConfigured(
"%s has no field named '%s'. To use this model with OwnershipPermissionPolicy, "
"you must specify a valid field name as owner_field_name."
% (self.model, self.owner_field_name)
)
def user_has_permission(self, user, action):
if action == "add":
return user.has_perm(self._get_permission_name("add"))
elif action == "change" or action == "delete":
return (
# having 'add' permission means that there are *potentially*
# some instances they can edit (namely: ones they own),
# which is sufficient for returning True here
user.has_perm(self._get_permission_name("add"))
or user.has_perm(self._get_permission_name("change"))
)
else:
# unrecognised actions are only allowed for active superusers
return user.is_active and user.is_superuser
def users_with_any_permission(self, actions):
if "change" in actions or "delete" in actions:
# either 'add' or 'change' permission means that there are *potentially*
# some instances they can edit
permission_codenames = [
"add_%s" % self.model_name,
"change_%s" % self.model_name,
]
elif "add" in actions:
permission_codenames = [
"add_%s" % self.model_name,
]
else:
# none of the actions passed in here are ones that we recognise, so only
# allow them for active superusers
return get_user_model().objects.filter(is_active=True, is_superuser=True)
return self._get_users_with_any_permission_codenames(permission_codenames)
def user_has_permission_for_instance(self, user, action, instance):
return self.user_has_any_permission_for_instance(user, [action], instance)
def user_has_any_permission_for_instance(self, user, actions, instance):
if "change" in actions or "delete" in actions:
if user.has_perm(self._get_permission_name("change")):
return True
elif (
user.has_perm(self._get_permission_name("add"))
and getattr(instance, self.owner_field_name) == user
):
return True
else:
return False
else:
# 'change' and 'delete' are the only actions that are well-defined
# for specific instances. Other actions are only available to
# active superusers.
return user.is_active and user.is_superuser
def instances_user_has_any_permission_for(self, user, actions):
if user.is_active and user.is_superuser:
# active superusers can perform any action (including unrecognised ones)
# on any instance
return self.model.objects.all()
elif "change" in actions or "delete" in actions:
if user.has_perm(self._get_permission_name("change")):
# user can edit all instances
return self.model.objects.all()
elif user.has_perm(self._get_permission_name("add")):
# user can edit their own instances
return self.model.objects.filter(**{self.owner_field_name: user})
else:
# user has no permissions at all on this model
return self.model.objects.none()
else:
# action is either not recognised, or is the 'add' action which is
# not meaningful for existing instances. As such, non-superusers
# cannot perform it on any existing instances.
return self.model.objects.none()
def users_with_any_permission_for_instance(self, actions, instance):
if "change" in actions or "delete" in actions:
# get filter expression for users with 'change' permission
filter_expr = self._get_users_with_any_permission_codenames_filter(
["change_%s" % self.model_name]
)
# add on the item's owner, if they still have 'add' permission
# (and the owner field isn't blank)
owner = getattr(instance, self.owner_field_name)
if owner is not None and owner.has_perm(self._get_permission_name("add")):
filter_expr = filter_expr | Q(pk=owner.pk)
# return the filtered queryset
return get_user_model().objects.filter(filter_expr).distinct()
else:
# action is either not recognised, or is the 'add' action which is
# not meaningful for existing instances. As such, the action is only
# available to superusers
return get_user_model().objects.filter(is_active=True, is_superuser=True)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResourceGraphClientOperationsMixin:
async def resource_changes(
self,
parameters: "_models.ResourceChangesRequestParameters",
**kwargs
) -> "_models.ResourceChangeList":
"""List changes to a resource for a given time interval.
:param parameters: the parameters for this request for changes.
:type parameters: ~azure.mgmt.resourcegraph.models.ResourceChangesRequestParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceChangeList, or the result of cls(response)
:rtype: ~azure.mgmt.resourcegraph.models.ResourceChangeList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceChangeList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.resource_changes.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourceChangesRequestParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceChangeList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
resource_changes.metadata = {'url': '/providers/Microsoft.ResourceGraph/resourceChanges'} # type: ignore
async def resource_change_details(
self,
parameters: "_models.ResourceChangeDetailsRequestParameters",
**kwargs
) -> List["_models.ResourceChangeData"]:
"""Get resource change details.
:param parameters: The parameters for this request for resource change details.
:type parameters: ~azure.mgmt.resourcegraph.models.ResourceChangeDetailsRequestParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ResourceChangeData, or the result of cls(response)
:rtype: list[~azure.mgmt.resourcegraph.models.ResourceChangeData]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ResourceChangeData"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.resource_change_details.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourceChangeDetailsRequestParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ResourceChangeData]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
resource_change_details.metadata = {'url': '/providers/Microsoft.ResourceGraph/resourceChangeDetails'} # type: ignore
async def resources(
self,
query: "_models.QueryRequest",
**kwargs
) -> "_models.QueryResponse":
"""Queries the resources managed by Azure Resource Manager for scopes specified in the request.
:param query: Request specifying query and its options.
:type query: ~azure.mgmt.resourcegraph.models.QueryRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QueryResponse, or the result of cls(response)
:rtype: ~azure.mgmt.resourcegraph.models.QueryResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.QueryResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.resources.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(query, 'QueryRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('QueryResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
resources.metadata = {'url': '/providers/Microsoft.ResourceGraph/resources'} # type: ignore
async def resources_history(
self,
request: "_models.ResourcesHistoryRequest",
**kwargs
) -> object:
"""List all snapshots of a resource for a given time interval.
:param request:
:type request: ~azure.mgmt.resourcegraph.models.ResourcesHistoryRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.resources_history.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'ResourcesHistoryRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
resources_history.metadata = {'url': '/providers/Microsoft.ResourceGraph/resourcesHistory'} # type: ignore
| |
from __future__ import unicode_literals
from datetime import date
from django.contrib.auth import models, management
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import changepassword
from django.contrib.auth.models import User
from django.contrib.auth.tests.test_custom_user import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.management import call_command
from django.core.management.base import CommandError
from django.core.management.validation import get_validation_errors
from django.db.models.loading import get_app
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils.six import StringIO
@skipIfCustomUser
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@skipIfCustomUser
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute("joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'")
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute("joe", stdout=self.stdout, stderr=self.stderr)
@skipIfCustomUser
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_createsuperuser(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
def test_verbosity_zero(self):
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
skip_validation=True
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
with self.assertRaises(CommandError):
call_command("createsuperuser",
interactive=False,
username="joe@somewhere.org",
stdout=new_io,
stderr=new_io,
skip_validation=True
)
self.assertEqual(CustomUser._default_manager.count(), 0)
class CustomUserModelValidationTestCase(TestCase):
@override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields')
def test_username_not_in_required_fields(self):
"USERNAME_FIELD should not appear in REQUIRED_FIELDS."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.", new_io.getvalue())
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername')
def test_username_non_unique(self):
"A non-unique USERNAME_FIELD should raise a model validation error."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The USERNAME_FIELD must be unique. Add unique=True to the field parameters.", new_io.getvalue())
class PermissionDuplicationTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(models, [], verbosity=0)
| |
# -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :ref:`Action <action_definition>` is what enables Watcher to transform the
current state of a :ref:`Cluster <cluster_definition>` after an
:ref:`Audit <audit_definition>`.
An :ref:`Action <action_definition>` is an atomic task which changes the
current state of a target :ref:`Managed resource <managed_resource_definition>`
of the OpenStack :ref:`Cluster <cluster_definition>` such as:
- Live migration of an instance from one compute node to another compute
node with Nova
- Changing the power level of a compute node (ACPI level, ...)
- Changing the current state of a compute node (enable or disable) with Nova
In most cases, an :ref:`Action <action_definition>` triggers some concrete
commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.).
An :ref:`Action <action_definition>` has a life-cycle and its current state may
be one of the following:
- **PENDING** : the :ref:`Action <action_definition>` has not been executed
yet by the :ref:`Watcher Applier <watcher_applier_definition>`
- **ONGOING** : the :ref:`Action <action_definition>` is currently being
processed by the :ref:`Watcher Applier <watcher_applier_definition>`
- **SUCCEEDED** : the :ref:`Action <action_definition>` has been executed
successfully
- **FAILED** : an error occurred while trying to execute the
:ref:`Action <action_definition>`
- **DELETED** : the :ref:`Action <action_definition>` is still stored in the
:ref:`Watcher database <watcher_database_definition>` but is not returned
any more through the Watcher APIs.
- **CANCELLED** : the :ref:`Action <action_definition>` was in **PENDING** or
**ONGOING** state and was cancelled by the
:ref:`Administrator <administrator_definition>`
:ref:`Some default implementations are provided <watcher_planners>`, but it is
possible to :ref:`develop new implementations <implement_action_plugin>` which
are dynamically loaded by Watcher at launch time.
"""
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher._i18n import _
from watcher.api.controllers import base
from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils as api_utils
from watcher.common import exception
from watcher.common import policy
from watcher import objects
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
"""
pass
class ActionPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class Action(base.APIBase):
"""API representation of a action.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a action.
"""
_action_plan_uuid = None
def _get_action_plan_uuid(self):
return self._action_plan_uuid
def _set_action_plan_uuid(self, value):
if value == wtypes.Unset:
self._action_plan_uuid = wtypes.Unset
elif value and self._action_plan_uuid != value:
try:
action_plan = objects.ActionPlan.get(
pecan.request.context, value)
self._action_plan_uuid = action_plan.uuid
self.action_plan_id = action_plan.id
except exception.ActionPlanNotFound:
self._action_plan_uuid = None
uuid = wtypes.wsattr(types.uuid, readonly=True)
"""Unique UUID for this action"""
action_plan_uuid = wtypes.wsproperty(types.uuid, _get_action_plan_uuid,
_set_action_plan_uuid,
mandatory=True)
"""The action plan this action belongs to """
state = wtypes.text
"""This audit state"""
action_type = wtypes.text
"""Action type"""
description = wtypes.text
"""Action description"""
input_parameters = types.jsontype
"""One or more key/value pairs """
parents = wtypes.wsattr(types.jsontype, readonly=True)
"""UUIDs of parent actions"""
links = wtypes.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated action links"""
def __init__(self, **kwargs):
super(Action, self).__init__()
self.fields = []
fields = list(objects.Action.fields)
fields.append('action_plan_uuid')
for field in fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
self.fields.append('action_plan_id')
self.fields.append('description')
setattr(self, 'action_plan_uuid', kwargs.get('action_plan_id',
wtypes.Unset))
@staticmethod
def _convert_with_links(action, url, expand=True):
if not expand:
action.unset_fields_except(['uuid', 'state', 'action_plan_uuid',
'action_plan_id', 'action_type',
'parents'])
action.links = [link.Link.make_link('self', url,
'actions', action.uuid),
link.Link.make_link('bookmark', url,
'actions', action.uuid,
bookmark=True)
]
return action
@classmethod
def convert_with_links(cls, action, expand=True):
action = Action(**action.as_dict())
try:
obj_action_desc = objects.ActionDescription.get_by_type(
pecan.request.context, action.action_type)
description = obj_action_desc.description
except exception.ActionDescriptionNotFound:
description = ""
setattr(action, 'description', description)
hide_fields_in_newer_versions(action)
return cls._convert_with_links(action, pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
description='action description',
state='PENDING',
created_at=datetime.datetime.utcnow(),
deleted_at=None,
updated_at=datetime.datetime.utcnow(),
parents=[])
sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
class ActionCollection(collection.Collection):
"""API representation of a collection of actions."""
actions = [Action]
"""A list containing actions objects"""
def __init__(self, **kwargs):
self._type = 'actions'
@staticmethod
def convert_with_links(actions, limit, url=None, expand=False,
**kwargs):
collection = ActionCollection()
collection.actions = [Action.convert_with_links(p, expand)
for p in actions]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.actions = [Action.sample(expand=False)]
return sample
class ActionsController(rest.RestController):
"""REST controller for Actions."""
def __init__(self):
super(ActionsController, self).__init__()
from_actions = False
"""A flag to indicate if the requests to this controller are coming
from the top-level resource Actions."""
_custom_actions = {
'detail': ['GET'],
}
def _get_actions_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None,
action_plan_uuid=None, audit_uuid=None):
additional_fields = ['action_plan_uuid']
api_utils.validate_sort_key(sort_key, list(objects.Action.fields) +
additional_fields)
limit = api_utils.validate_limit(limit)
api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.Action.get_by_uuid(pecan.request.context,
marker)
filters = {}
if action_plan_uuid:
filters['action_plan_uuid'] = action_plan_uuid
if audit_uuid:
filters['audit_uuid'] = audit_uuid
need_api_sort = api_utils.check_need_api_sort(sort_key,
additional_fields)
sort_db_key = (sort_key if not need_api_sort
else None)
actions = objects.Action.list(pecan.request.context,
limit,
marker_obj, sort_key=sort_db_key,
sort_dir=sort_dir,
filters=filters)
actions_collection = ActionCollection.convert_with_links(
actions, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
if need_api_sort:
api_utils.make_api_sort(actions_collection.actions,
sort_key, sort_dir)
return actions_collection
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
wtypes.text, wtypes.text, types.uuid,
types.uuid)
def get_all(self, marker=None, limit=None,
sort_key='id', sort_dir='asc', action_plan_uuid=None,
audit_uuid=None):
"""Retrieve a list of actions.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param action_plan_uuid: Optional UUID of an action plan,
to get only actions for that action plan.
:param audit_uuid: Optional UUID of an audit,
to get only actions for that audit.
"""
context = pecan.request.context
policy.enforce(context, 'action:get_all',
action='action:get_all')
if action_plan_uuid and audit_uuid:
raise exception.ActionFilterCombinationProhibited
return self._get_actions_collection(
marker, limit, sort_key, sort_dir,
action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid)
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
wtypes.text, wtypes.text, types.uuid,
types.uuid)
def detail(self, marker=None, limit=None,
sort_key='id', sort_dir='asc', action_plan_uuid=None,
audit_uuid=None):
"""Retrieve a list of actions with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param action_plan_uuid: Optional UUID of an action plan,
to get only actions for that action plan.
:param audit_uuid: Optional UUID of an audit,
to get only actions for that audit.
"""
context = pecan.request.context
policy.enforce(context, 'action:detail',
action='action:detail')
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "actions":
raise exception.HTTPNotFound
if action_plan_uuid and audit_uuid:
raise exception.ActionFilterCombinationProhibited
expand = True
resource_url = '/'.join(['actions', 'detail'])
return self._get_actions_collection(
marker, limit, sort_key, sort_dir, expand, resource_url,
action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid)
@wsme_pecan.wsexpose(Action, types.uuid)
def get_one(self, action_uuid):
"""Retrieve information about the given action.
:param action_uuid: UUID of a action.
"""
if self.from_actions:
raise exception.OperationNotPermitted
context = pecan.request.context
action = api_utils.get_resource('Action', action_uuid)
policy.enforce(context, 'action:get', action, action='action:get')
return Action.convert_with_links(action)
@wsme_pecan.wsexpose(Action, body=Action, status_code=201)
def post(self, action):
"""Create a new action(forbidden).
:param action: a action within the request body.
"""
# FIXME: blueprint edit-action-plan-flow
raise exception.OperationNotPermitted(
_("Cannot create an action directly"))
if self.from_actions:
raise exception.OperationNotPermitted
action_dict = action.as_dict()
context = pecan.request.context
new_action = objects.Action(context, **action_dict)
new_action.create()
# Set the HTTP Location Header
pecan.response.location = link.build_url('actions', new_action.uuid)
return Action.convert_with_links(new_action)
@wsme.validate(types.uuid, [ActionPatchType])
@wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType])
def patch(self, action_uuid, patch):
"""Update an existing action(forbidden).
:param action_uuid: UUID of a action.
:param patch: a json PATCH document to apply to this action.
"""
# FIXME: blueprint edit-action-plan-flow
raise exception.OperationNotPermitted(
_("Cannot modify an action directly"))
if self.from_actions:
raise exception.OperationNotPermitted
action_to_update = objects.Action.get_by_uuid(pecan.request.context,
action_uuid)
try:
action_dict = action_to_update.as_dict()
action = Action(**api_utils.apply_jsonpatch(action_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Action.fields:
try:
patch_val = getattr(action, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if action_to_update[field] != patch_val:
action_to_update[field] = patch_val
action_to_update.save()
return Action.convert_with_links(action_to_update)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, action_uuid):
"""Delete a action(forbidden).
:param action_uuid: UUID of a action.
"""
# FIXME: blueprint edit-action-plan-flow
raise exception.OperationNotPermitted(
_("Cannot delete an action directly"))
action_to_delete = objects.Action.get_by_uuid(
pecan.request.context,
action_uuid)
action_to_delete.soft_delete()
| |
"""Unit tests for graph tensor container."""
import copy
from typing import List
from absl import logging
import mock
import tensorflow as tf
from tensorflow_gnn.graph import adjacency as adj
from tensorflow_gnn.graph import graph_tensor as gt
from tensorflow_gnn.graph import schema_validation as sv
import tensorflow_gnn.proto.graph_schema_pb2 as schema_pb2
from google.protobuf import text_format
as_tensor = tf.convert_to_tensor
class GraphValidationTest(tf.test.TestCase):
def test_validate_schema_feature_dtypes(self):
schema = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "num_words"
value {
description: "Number of stop words, regular words, frequent words"
}
}
}
}
""", schema_pb2.GraphSchema())
# Check that dtype is always set.
with self.assertRaises(sv.ValidationError):
sv._validate_schema_feature_dtypes(schema)
for dtype in tf.string, tf.int64, tf.float32:
num_words = schema.node_sets['queries'].features['num_words']
num_words.dtype = dtype.as_datatype_enum
sv._validate_schema_feature_dtypes(schema)
for dtype in tf.int32, tf.float64:
num_words = schema.node_sets['queries'].features['num_words']
num_words.dtype = dtype.as_datatype_enum
with self.assertRaises(sv.ValidationError):
sv._validate_schema_feature_dtypes(schema)
def test_validate_schema_shapes(self):
schema = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "num_words"
value {
description: "Number of stop words, regular words, frequent words"
shape { dim { size: 1 } }
}
}
}
}
""", schema_pb2.GraphSchema())
# Test tensor shape protos with unknown ranks (not allowed).
shape = schema.node_sets['queries'].features['num_words'].shape
shape.dim[0].size = 2
sv._validate_schema_shapes(schema)
shape.unknown_rank = True
with self.assertRaises(sv.ValidationError):
sv._validate_schema_shapes(schema)
def test_warn_schema_scalar_shapes(self):
schema = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "num_words"
value {
description: "Number of stop words, regular words, frequent words"
shape { dim { size: 1 } }
}
}
}
}
""", schema_pb2.GraphSchema())
warnings = sv._warn_schema_scalar_shapes(schema)
self.assertIsInstance(warnings, list)
self.assertLen(warnings, 1)
self.assertIsInstance(warnings[0], sv.ValidationError)
def test_validate_schema_descriptions(self):
schema = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "num_words"
value {
description: "Number of stop words, regular words, frequent words"
shape {
dim {
size: 3
name: "Type of word" # Legitimate usage.
}
}
}
}
}
}
""", schema_pb2.GraphSchema())
sv._validate_schema_descriptions(schema)
schema.node_sets['queries'].features['num_words'].ClearField('description')
with self.assertRaises(sv.ValidationError):
sv._validate_schema_descriptions(schema)
@mock.patch.object(logging, 'error')
def test_validate_schema_reserved_feature_names(self, mock_error):
# Invalidate feature names on node sets.
for name in '#size', '#id':
schema = schema_pb2.GraphSchema()
_ = schema.node_sets['queries'].features['#size']
with self.assertRaises(sv.ValidationError):
sv._validate_schema_reserved_feature_names(schema)
# Invalidate feature names on edge sets.
for name in '#size', '#source', '#target':
schema = schema_pb2.GraphSchema()
_ = schema.edge_sets['documents'].features[name]
with self.assertRaises(sv.ValidationError,
msg='Feature: {}'.format(name)):
sv._validate_schema_reserved_feature_names(schema)
# Check that an error is issued for other features.
#
# TODO(blais,aferludin): We cannot raise an exception yet because the graph
# sampler uses a number of hardcoded features with '#' prefix. Remove those
# features from the sampler.
name = '#weight'
schema = schema_pb2.GraphSchema()
_ = schema.edge_sets['documents'].features[name]
sv._validate_schema_reserved_feature_names(schema)
mock_error.assert_called()
def test_validate_schema_context_references(self):
schema = text_format.Parse("""
context {
features {
key: "embedding"
value: { dtype: DT_FLOAT shape: { dim { size: 10 } } }
}
}
node_sets {
key: "queries"
}
node_sets {
key: "documents"
}
edge_sets {
key: "clicks"
value {
source: "queries"
target: "documents"
}
}
""", schema_pb2.GraphSchema())
schema.node_sets['queries'].context.append('embedding')
sv._validate_schema_context_references(schema)
schema.node_sets['queries'].context.append('devnull')
with self.assertRaises(sv.ValidationError):
sv._validate_schema_context_references(schema)
schema.node_sets['queries'].context[:] = []
schema.edge_sets['clicks'].context.append('embedding')
sv._validate_schema_context_references(schema)
schema.edge_sets['clicks'].context.append('devnull')
with self.assertRaises(sv.ValidationError):
sv._validate_schema_context_references(schema)
def test_validate_schema_node_set_references(self):
schema = text_format.Parse("""
node_sets {
key: "queries"
}
node_sets {
key: "documents"
}
edge_sets {
key: "clicks"
value {
source: "queries"
target: "documents"
}
}
""", schema_pb2.GraphSchema())
sv._validate_schema_node_set_references(schema)
bad_schema = copy.copy(schema)
bad_schema.edge_sets['clicks'].source = 'devnull'
with self.assertRaises(sv.ValidationError):
sv._validate_schema_node_set_references(bad_schema)
bad_schema = copy.copy(schema)
bad_schema.edge_sets['clicks'].target = 'devnull'
with self.assertRaises(sv.ValidationError):
sv._validate_schema_node_set_references(bad_schema)
class SchemaTests(tf.test.TestCase):
def test_check_required_features__missing_feature(self):
required = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "musthave"
}
}
}
""", schema_pb2.GraphSchema())
given = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "musthave"
}
}
}
""", schema_pb2.GraphSchema())
sv.check_required_features(required, given)
given.node_sets['queries'].features['extras'].CopyFrom(
given.node_sets['queries'].features['musthave'])
sv.check_required_features(required, given)
del given.node_sets['queries'].features['musthave']
with self.assertRaises(sv.ValidationError):
sv.check_required_features(required, given)
def test_check_required_features__invalid_dtype(self):
required = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "musthave"
value { dtype: DT_STRING }
}
}
}
""", schema_pb2.GraphSchema())
given = copy.copy(required)
sv.check_required_features(required, given)
given.node_sets['queries'].features['musthave'].dtype = (
tf.dtypes.float32.as_datatype_enum)
with self.assertRaises(sv.ValidationError):
sv.check_required_features(required, given)
required.node_sets['queries'].features['musthave'].ClearField('dtype')
sv.check_required_features(required, given)
def test_check_required_features__invalid_shape(self):
required = text_format.Parse("""
node_sets {
key: "queries"
value {
features {
key: "musthave"
value { shape { dim { size: 10 } dim { size: 20 } } }
}
}
}
""", schema_pb2.GraphSchema())
given = copy.copy(required)
# Check matching.
sv.check_required_features(required, given)
# Check invalid size (both present).
req_musthave = required.node_sets['queries'].features['musthave']
req_musthave.shape.dim[0].size += 1
with self.assertRaises(sv.ValidationError):
sv.check_required_features(required, given)
# Check ignoring dim.
req_musthave.shape.dim[0].ClearField('size')
sv.check_required_features(required, given)
# Check ignoring dim, failing rank.
del req_musthave.shape.dim[1]
with self.assertRaises(sv.ValidationError):
sv.check_required_features(required, given)
# Check enabled for scalar feature.
req_musthave.shape.ClearField('dim')
with self.assertRaises(sv.ValidationError):
sv.check_required_features(required, given)
# NOTE(blais): These tests are a holdover of the previous iteration where we did
# everything using dicts. Eventually they could find their way into the
# GraphTensor constructor itself.
class GraphConstraintsTest(tf.test.TestCase):
def test_assert_constraints_feature_shape_prefix_nodes(self):
# Check valid.
testgraph = gt.GraphTensor.from_pieces(
node_sets={
'n': gt.NodeSet.from_fields(
features={'f': as_tensor([3, 4, 5])},
sizes=as_tensor([3]))})
sv._assert_constraints_feature_shape_prefix(testgraph)
# Corrupt and check invalid. (We could make mutation more robust, but it
# comes in handy in this test.)
size_name = testgraph.node_sets['n']._DATAKEY_SIZES
testgraph.node_sets['n']._data[size_name] = as_tensor([[3]])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_feature_shape_prefix(testgraph)
size_name = testgraph.node_sets['n']._DATAKEY_SIZES
testgraph.node_sets['n']._data[size_name] = as_tensor([4])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_feature_shape_prefix(testgraph)
# Check invalid prefix shape.
testgraph = gt.GraphTensor.from_pieces(
node_sets={
'n': gt.NodeSet.from_fields(
features={'f': as_tensor([3, 4, 5, 6])},
sizes=as_tensor([3]))})
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_feature_shape_prefix(testgraph)
def test_assert_constraints_feature_shape_prefix_edges(self):
# Check valid.
testgraph = gt.GraphTensor.from_pieces(
node_sets={
'n': gt.NodeSet.from_fields(
features={'f': as_tensor(['a', 'b', 'c', 'd'])},
sizes=as_tensor([4]))},
edge_sets={
'e': gt.EdgeSet.from_fields(
features={'w': as_tensor([3, 4, 5])},
sizes=as_tensor([3]),
adjacency=adj.Adjacency.from_indices(
('n', as_tensor([0, 1, 0])),
('n', as_tensor([1, 0, 1])))
)})
sv._assert_constraints_feature_shape_prefix(testgraph)
size_name = testgraph.edge_sets['e']._DATAKEY_SIZES
testgraph.edge_sets['e']._data[size_name] = as_tensor([[3]])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_feature_shape_prefix(testgraph)
size_name = testgraph.edge_sets['e']._DATAKEY_SIZES
testgraph.edge_sets['e']._data[size_name] = as_tensor([4])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_feature_shape_prefix(testgraph)
def _create_test_graph_with_indices(self,
source: List[int],
target: List[int]):
assert len(source) == len(target)
return gt.GraphTensor.from_pieces(
node_sets={
'n': gt.NodeSet.from_fields(
features={'f': as_tensor(['a', 'b', 'c', 'd'])},
sizes=as_tensor([4]))},
edge_sets={
'e': gt.EdgeSet.from_fields(
features={'w': as_tensor([3, 4, 5])},
sizes=as_tensor([2]),
adjacency=adj.Adjacency.from_indices(
('n', as_tensor(source)),
('n', as_tensor(target)))
)})
def test_assert_constraints_edge_indices_range_valid(self):
testgraph = self._create_test_graph_with_indices([0, 3], [0, 3])
sv._assert_constraints_edge_indices_range(testgraph)
# Underflow.
testgraph = self._create_test_graph_with_indices([0, -1], [0, 3])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_edge_indices_range(testgraph)
# Overflow.
testgraph = self._create_test_graph_with_indices([0, 4], [0, 3])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_edge_indices_range(testgraph)
def test_assert_constraints_edge_shapes(self):
testgraph = self._create_test_graph_with_indices([0, 3], [0, 3])
sv._assert_constraints_edge_shapes(testgraph)
testgraph = self._create_test_graph_with_indices([0, 1, 2], [0, 1, 2])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_edge_shapes(testgraph)
testgraph = self._create_test_graph_with_indices([[0, 3]], [[0, 3]])
with self.assertRaises(tf.errors.InvalidArgumentError):
sv._assert_constraints_edge_shapes(testgraph)
if __name__ == '__main__':
tf.test.main()
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import math
import typing
from absl import logging
import google.auth
from google.cloud import bigquery
from handler import utils
import metrics_pb2
BQ_JOB_TABLE_NAME = 'job_history'
BQ_METRIC_TABLE_NAME = 'metric_history'
PROTO_STATUS_TO_BQ_STATUS = {
metrics_pb2.TestCompletedEvent.COMPLETED: 'success',
metrics_pb2.TestCompletedEvent.FAILED: 'failure',
metrics_pb2.TestCompletedEvent.TIMEOUT: 'timeout',
}
@dataclasses.dataclass
class JobHistoryRow:
"""Represents a database row containing a test job's status."""
uuid: str
test_name: str
test_type: str
accelerator: str
framework_version: str
job_status: str
num_failures: int
job_duration_sec: int
timestamp: datetime.datetime
stackdriver_logs_link: str
msg_publish_time: typing.Optional[str] = None
logs_download_command: typing.Optional[str] = None
kubernetes_workload_link: typing.Optional[str] = None
@staticmethod
def from_test_event(
unique_key: str,
event: metrics_pb2.TestCompletedEvent
):
return JobHistoryRow(
unique_key,
event.benchmark_id,
event.labels.get('mode'),
event.labels.get('accelerator'),
event.labels.get('frameworkVersion'),
PROTO_STATUS_TO_BQ_STATUS[event.status],
event.num_attempts - 1 if event.status == metrics_pb2.TestCompletedEvent.COMPLETED else event.num_attempts,
event.duration.seconds,
event.start_time.ToDatetime(),
event.debug_info.logs_link,
event.start_time.ToDatetime().timestamp() + event.duration.ToTimedelta().total_seconds(),
event.debug_info.logs_download_command,
event.debug_info.details_link,
)
@dataclasses.dataclass
class MetricHistoryRow:
"""Represents a database row containing a test's metrics."""
uuid: str
test_name: str
timestamp: datetime.datetime
metric_name: str
metric_value: float
metric_lower_bound: typing.Optional[float] = None
metric_upper_bound: typing.Optional[float] = None
@staticmethod
def from_metric_point(
unique_key: str,
point: utils.MetricPoint,
event: metrics_pb2.TestCompletedEvent
):
return MetricHistoryRow(
unique_key,
event.benchmark_id,
event.start_time.ToDatetime(),
point.metric_key,
point.metric_value,
point.bounds.lower,
point.bounds.upper,
)
def _to_bigquery_schema(dataclass: typing.Any) -> typing.List[bigquery.SchemaField]:
"""Converts a @dataclass to a BigQuery schema."""
python_type_to_bq_type = {
str: ("STRING", "REQUIRED"),
int: ("INT64", "REQUIRED"),
float: ("FLOAT64", "REQUIRED"),
datetime.datetime: ("TIMESTAMP", "REQUIRED"),
}
# Add Optional types to dict
python_type_to_bq_type.update({
typing.Optional[pt]: (bqt, "NULLABLE")
for pt, (bqt, _) in python_type_to_bq_type.items()
})
return [
bigquery.SchemaField(field.name, *python_type_to_bq_type[field.type])
for field in dataclasses.fields(dataclass)
]
def _is_valid_value(v: typing.Any) -> bool:
"""Return True if value is valid for writing to BigQuery.
Args:
v (anything): Value to check.
"""
invalid_values = [math.inf, -math.inf, math.nan]
if v in invalid_values:
return False
try:
if math.isnan(v):
return False
except TypeError:
pass
return True
def _replace_invalid_values(row):
"""Replace float values that are not available in BigQuery.
Args:
row: List of values to insert into BigQuery.
Returns:
List, `row` with invalid values replaced with `None`.
"""
return [x if _is_valid_value(x) else None for x in row]
class BigQueryMetricStore:
def __init__(self, dataset: str, project: typing.Optional[str] = None):
self._dataset = dataset
self._project = project or google.auth.default()[1]
self.bigquery_client = bigquery.Client(
project=project,
default_query_job_config=bigquery.job.QueryJobConfig(
default_dataset=".".join((self._project, self._dataset)),
)
)
@property
def job_history_table_id(self):
return ".".join((self._project, self._dataset, BQ_JOB_TABLE_NAME))
@property
def metric_history_table_id(self):
return ".".join((self._project, self._dataset, BQ_METRIC_TABLE_NAME))
def create_tables(self):
"""Create new BigQuery tables from the schema."""
dataset = bigquery.Dataset(self.bigquery_client.dataset(self._dataset))
_ = self.bigquery_client.create_dataset(dataset, exists_ok=True)
job_history_schema = _to_bigquery_schema(JobHistoryRow)
job_history_table = bigquery.Table(
self.job_history_table_id, schema=job_history_schema)
_ = self.bigquery_client.create_table(job_history_table, exists_ok=True)
metric_history_schema = _to_bigquery_schema(MetricHistoryRow)
metric_history_table = bigquery.Table(
self.metric_history_table_id, schema=metric_history_schema)
_ = self.bigquery_client.create_table(metric_history_table, exists_ok=True)
def insert_status_and_metrics(
self,
job: JobHistoryRow,
metrics: typing.Iterable[MetricHistoryRow]):
"""Inserts job status and metric values from a test run into BigQuery."""
# Every job should have 1 job status row and it should exist even if
# no other metrics exist.
job_history_rows = [dataclasses.astuple(job)]
# Create rows to represent the computed metrics for this job.
metric_history_rows = []
for metric in metrics:
if not _is_valid_value(float(metric.metric_value)):
logging.warning(
'Found metric row with invalid value: {} {} {}'.format(
job.test_name,
metric.metric_name,
metric.metric_value))
continue
metric_history_rows.append(dataclasses.astuple(metric))
# Insert rows in Bigquery.
for table_id, rows in [
(self.job_history_table_id, job_history_rows),
(self.metric_history_table_id, metric_history_rows),
]:
if not rows:
continue
logging.info(
'Inserting {} rows into BigQuery table `{}`'.format(
len(rows), table_id))
table = self.bigquery_client.get_table(table_id)
clean_rows = [_replace_invalid_values(row) for row in rows]
errors = self.bigquery_client.insert_rows(table, clean_rows)
if not errors:
logging.info('Successfully added rows to Bigquery.')
else:
logging.error(
'Failed to add rows to Bigquery. Errors: {}'.format(errors))
def get_metric_history(
self,
benchmark_id: str,
metric_key: str,
min_time: datetime.datetime,
) -> typing.Iterable[MetricHistoryRow]:
"""Returns the historic values of each metric for a given model.
Args:
benchmark_id: Unique ID for a test.
metric_key: Unique ID for a metric.
min_time: Minimum timestamp for metric values. Metrics recorded before
this timestamp will not be returned.
Returns:
List of MetricHistory containing a metric's history.
"""
query = """
SELECT *
FROM `metric_history`
WHERE test_name LIKE @benchmark_id AND
metric_name LIKE @metric_key AND
(metric_lower_bound IS NULL OR metric_value >= metric_lower_bound) AND
(metric_upper_bound IS NULL OR metric_value <= metric_upper_bound) AND
timestamp >= @min_time AND
uuid IN (
SELECT uuid
FROM `job_history`
WHERE test_name LIKE @benchmark_id AND job_status = \"success"\
)
"""
job_config = bigquery.QueryJobConfig(
query_parameters =[
bigquery.ScalarQueryParameter("benchmark_id",
"STRING", benchmark_id),
bigquery.ScalarQueryParameter("metric_key",
"STRING", metric_key),
bigquery.ScalarQueryParameter("min_time",
"TIMESTAMP", min_time),
]
)
query_result = self.bigquery_client.query(
query, job_config=job_config)
return [MetricHistoryRow(**row) for row in query_result]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
"galleryApplicationVersionName": _SERIALIZER.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
"galleryApplicationVersionName": _SERIALIZER.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
*,
expand: Optional[Union[str, "_models.ReplicationStatusTypes"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
"galleryApplicationVersionName": _SERIALIZER.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
"galleryApplicationVersionName": _SERIALIZER.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_gallery_application_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class GalleryApplicationVersionsOperations(object):
"""GalleryApplicationVersionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersion",
**kwargs: Any
) -> "_models.GalleryApplicationVersion":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application_version, 'GalleryApplicationVersion')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersion",
**kwargs: Any
) -> LROPoller["_models.GalleryApplicationVersion"]:
"""Create or update a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be created.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
created. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the create or update gallery
Application Version operation.
:type gallery_application_version:
~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryApplicationVersion or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
gallery_application_version=gallery_application_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersionUpdate",
**kwargs: Any
) -> "_models.GalleryApplicationVersion":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application_version, 'GalleryApplicationVersionUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersionUpdate",
**kwargs: Any
) -> LROPoller["_models.GalleryApplicationVersion"]:
"""Update a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be updated.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
updated. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the update gallery Application
Version operation.
:type gallery_application_version:
~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersionUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryApplicationVersion or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
gallery_application_version=gallery_application_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
expand: Optional[Union[str, "_models.ReplicationStatusTypes"]] = None,
**kwargs: Any
) -> "_models.GalleryApplicationVersion":
"""Retrieves information about a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version resides.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
retrieved.
:type gallery_application_version_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str or ~azure.mgmt.compute.v2021_10_01.models.ReplicationStatusTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryApplicationVersion, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version resides.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
deleted.
:type gallery_application_version_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
@distributed_trace
def list_by_gallery_application(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> Iterable["_models.GalleryApplicationVersionList"]:
"""List gallery Application Versions in a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the Shared Application Gallery Application
Definition from which the Application Versions are to be listed.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryApplicationVersionList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_application_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=self.list_by_gallery_application.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_application_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryApplicationVersionList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_gallery_application.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions'} # type: ignore
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-16 23:12:48
import os
import sys
import time
import inspect
import functools
import traceback
from libs.log import LogFormatter
from libs.url import quote_chinese, _build_url
from libs.utils import md5string, hide_me
from libs.ListIO import ListO
from libs.response import rebuild_response
from collections import namedtuple
class ProcessorResult(object):
def __init__(self, result, follows, messages, logs, exception, extinfo):
self.result = result
self.follows = follows
self.messages = messages
self.logs = logs
self.exception = exception
self.extinfo = extinfo
def rethrow(self):
if self.exception:
raise self.exception
def logstr(self):
result = []
formater = LogFormatter(color=False)
for record in self.logs:
if isinstance(record, basestring):
result.append(record)
continue
else:
if record.exc_info:
a, b, tb = record.exc_info
tb = hide_me(tb, globals())
record.exc_info = a, b, tb
result.append(formater.format(record))
result.append('\n')
return ''.join(result)
def catch_status_code_error(func):
func._catch_status_code_error = True
return func
def not_send_status(func):
@functools.wraps(func)
def wrapper(self, response, task):
self._extinfo['not_send_status'] = True
function = func.__get__(self, self.__class__)
return self._run_func(function, response, task)
return wrapper
def config(_config):
def wrapper(func):
func._config = _config
return func
return wrapper
def every(minutes=1):
def wrapper(func):
@functools.wraps(func)
def on_cronjob(self, response, task):
if response.save and 'tick' in response.save and response.save['tick'] % minutes == 0:
function = func.__get__(self, self.__class__)
return self._run_func(function, response, task)
return None
return on_cronjob
return wrapper
class BaseHandlerMeta(type):
def __new__(cls, name, bases, attrs):
if '_on_message' in attrs:
attrs['_on_message'] = not_send_status(attrs['_on_message'])
if 'on_cronjob' in attrs:
attrs['on_cronjob'] = not_send_status(attrs['on_cronjob'])
return type.__new__(cls, name, bases, attrs)
class BaseHandler(object):
__metaclass__ = BaseHandlerMeta
def _init(self, project):
self._name = project['name']
self._project = project
return self
def _reset(self):
self._extinfo = {}
self._messages = []
self._follows = []
def _run_func(self, function, *arguments):
args, varargs, keywords, defaults = inspect.getargspec(function)
return function(*arguments[:len(args)-1])
def _run(self, task, response):
self._reset()
if isinstance(response, dict):
response = rebuild_response(response)
process = task.get('process', {})
callback = process.get('callback', '__call__')
if not hasattr(self, callback):
raise NotImplementedError("self.%s() not implemented!" % callback)
function = getattr(self, callback)
if not getattr(function, '_catch_status_code_error', False):
response.raise_for_status()
return self._run_func(function, response, task)
def run(self, module, task, response):
logger = module.get('logger')
result = None
exception = None
stdout = sys.stdout
try:
sys.stdout = ListO(module.logs)
result = self._run(task, response)
self._run_func(self.on_result, result, response, task)
except Exception, e:
logger.exception(e)
exception = e
finally:
sys.stdout = stdout
follows = self._follows
messages = self._messages
logs = module.logs
extinfo = self._extinfo
return ProcessorResult(result, follows, messages, logs, exception, extinfo)
def _crawl(self, url, **kwargs):
task = {}
if kwargs.get('callback'):
callback = kwargs['callback']
if isinstance(callback, basestring) and hasattr(self, callback):
func = getattr(self, callback)
elif hasattr(callback, 'im_self') and callback.im_self is self:
func = callback
kwargs['callback'] = func.__name__
else:
raise NotImplementedError("self.%s() not implemented!" % callback)
if hasattr(func, '_config'):
for k, v in func._config.iteritems():
kwargs.setdefault(k, v)
if hasattr(self, 'crawl_config'):
for k, v in self.crawl_config.iteritems():
kwargs.setdefault(k, v)
url = quote_chinese(_build_url(url.strip(), kwargs.get('params')))
if kwargs.get('files'):
assert isinstance(kwargs.get('data', {}), dict), "data must be a dict when using with files!"
content_type, data = _encode_multipart_formdata(kwargs.get('data', {}),
kwargs.get('files', {}))
kwargs.setdefault('headers', {})
kwargs['headers']['Content-Type'] = content_type
kwargs['data'] = data
if kwargs.get('data'):
kwargs['data'] = _encode_params(kwargs['data'])
schedule = {}
for key in ('priority', 'retries', 'exetime', 'age', 'itag', 'force_update'):
if key in kwargs and kwargs[key] is not None:
schedule[key] = kwargs[key]
if schedule:
task['schedule'] = schedule
fetch = {}
for key in ('method', 'headers', 'data', 'timeout', 'allow_redirects', 'cookies', 'proxy', 'etag', 'last_modifed', 'save'):
if key in kwargs and kwargs[key] is not None:
fetch[key] = kwargs[key]
if fetch:
task['fetch'] = fetch
process = {}
for key in ('callback', ):
if key in kwargs and kwargs[key] is not None:
process[key] = kwargs[key]
if process:
task['process'] = process
task['project'] = self._name
task['url'] = url
task['taskid'] = task.get('taskid') or md5string(url)
self._follows.append(task)
return task
# apis
def crawl(self, url, **kwargs):
'''
params:
url
callback
method
params
data
files
headers
timeout
allow_redirects
cookies
proxy
etag
last_modifed
priority
retries
exetime
age
itag
save
taskid
'''
if isinstance(url, basestring):
return self._crawl(url, **kwargs)
elif hasattr(url, "__iter__"):
result = []
for each in url:
result.append(self._crawl(each, **kwargs))
return result
def send_message(self, project, msg):
self._messages.append((project, msg))
@not_send_status
def _on_message(self, response):
project, msg = response.save
return self.on_message(project, msg)
def on_message(self, project, msg):
pass
def on_cronjob(self):
pass
def on_result(self, result):
pass
| |
''' get gabodsid '''
def gabodsid(inputdate):
import re, os
file = "/afs/slac.stanford.edu/u/ki/pkelly/pipeline/bluh"
command = "/afs/slac/u/ki/anja/software/ldacpipeline-0.12.20/bin/Linux_64/mjd -t 22:00:00 -d " + inputdate + " > " + file
print command
os.system(command)
yy = open(file,'r').readlines()
MJD = ((re.split('\s+',yy[0])[-2]))
file = "/afs/slac.stanford.edu/u/ki/pkelly/pipeline/bluh"
os.system("/afs/slac/u/ki/anja/software/ldacpipeline-0.12.20/bin/Linux_64/nightid -t 22:00:00 -d 31/12/1998 -m " + MJD + "> " + file )
yy = open(file,'r').readlines()
oo = int(float(re.split('\s+',yy[1])[-2]))
return oo
def get_gabodsid(file):
import os
command = "dfits " + file + " | grep GABODSID > hh"
print command
os.system(command)
jj = open('hh','r').readlines()
import re
date = re.split('\s+',jj[0])[1]
return date
def get_date(file):
command = "dfits " + file + " | grep DATE-OBS > hh"
print command
os.system(command)
jj = open('hh','r').readlines()
import re
date = re.split('\'',jj[0])[1]
year = float(date[0:4])
month = float(date[5:7])
day = float(date[8:])
return (year, month, day)
''' see if one date falls inside two limits format is date = [yyyy,mm,dd] '''
def inside(date, down_limit, up_limit):
good = 0
print date[0], date[1], date[2], down_limit[0], up_limit[0]
if (date[0] > down_limit[0]) and (date[0] < up_limit[0]):
good = 1
elif date[0] == down_limit[0]:
if date[1] > down_limit[1]:
good = 1
elif date[1] == down_limit[1]:
if date[2] > down_limit[2]:
good = 1
elif date[0] == up_limit[0]:
if date[1] < up_limit[1]:
good = 1
elif date[1] == up_limit[1]:
if date[2] < up_limit[2]:
good = 1
print good
return good
''' the only thing that matters is the date '''
''' P_IMCOMBFLAT_IMCAT '''
'''
P_IMCOMBFLAT_IMCAT=${BIN}/imcombflat
${P_IMCOMBFLAT_IMCAT} -i flat_images_$$\
-o ${RESULTDIR[${CHIP}]}/$3_${CHIP}.fits \
-s 1 -e 0 1
'''
def runit(dir):
import os
chip_confs = [[[2001,10,18],[2002,9,5],'_10_1'],[[2002,9,5],[2100,1,1],'_10_2']]
''' split into different chip configurations '''
from glob import glob
list = glob(dir + "*fits")
for chip_conf in chip_confs:
newdir = os.environ['dougdir'] + "SUBARU/skyflat" + chip_conf[2]
#anjadir = os.environ['subdir'] + "SUBARU/skyflat" + chip_conf[2]
os.system('mkdir ' + newdir)
#os.system('ln -s ' + newdir + ' ' + anjadir )
for file in list:
(year, month, day) = get_date(file)
for chip_conf in chip_confs:
print year, month,day, chip_conf[0], chip_conf[1]
print inside([year,month,day],chip_conf[0],chip_conf[1])
if inside((year,month,day),chip_conf[0],chip_conf[1]):
try:
os.system("cp " + file + ' ' + os.environ['dougdir'] + 'nobackup/SUBARU/skyflats' + chip_conf[2] + '/')
except: print 'failed'
def combineperiods(interval,dir):
import os, re
statsxmin = '500'
statsxmax = '1500'
statsymin = '1500'
statsymax = '2500'
firstchip = 'yes'
uu = open('rosetta','w')
batchfiles = open('batchfiles','w')
batchdivfiles = open('batchdivfiles','w')
batchnormfiles = open('batchnormfiles','w')
from glob import glob
u2 = open('reject.skyflat','r').readlines()
rejectfiles = []
for line in u2:
temp = re.split('\/',line[:-1])[-1]
out = re.split('_',temp)[0]
rejectfiles.append(out)
list = glob(dir + "*10OC.fits")
files = []
badfiles = []
for line in list:
#print re.split('\/',line)
temp = re.split('\/',line)[-1]
temp = re.split('_',temp)[0]
bad = 0
for file in rejectfiles:
import string
if string.find(temp,file) != -1:
bad = 1
if bad == 0:
files.append(temp)
else:
badfiles.append(temp)
for chipnumber in range(1,11):
''' first break up images into different epochs '''
month_period = 6
from glob import glob
#list = glob(dir + "*OC.fits")
command = "imstats `ls " + dir + "*" + str(chipnumber) + "OC.fits` -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o outliststats" + str(chipnumber)
#os.system(command)
list = open('outliststats' + str(chipnumber),'r').readlines()
datelist = []
index = 0
for file in list:
if file[0] != '#' and file[0] != '':
filename = re.split('\s+',file)[0]
bad = 1
for file2 in files:
if string.find(filename,file2) != -1:
bad = 0
if bad == 0:
index = index + 1
gabodsid = get_gabodsid(filename)
datelist.append([gabodsid,file[:-1],filename])
command = "/afs/slac/u/ki/anja/software/ldacpipeline-0.12.20/bin/Linux_64/caldate -d 31/12/1998 -i " + gabodsid + " > temp "
os.system(command)
yy = open('temp','r').readlines()
date = ((re.split('\s+',yy[0])[-2]))
uu.write(gabodsid + " " + date + "\n")
datelist.sort()
rr = open('dates','w')
for obs in datelist:
rr.write(str(obs[0]) + ' ' + str(obs[2]) + '\n')
rr.close()
limit_up = float(datelist[-1][0])
limit_down = float(datelist[0][0])
''' a six month period is approximately 30*6=180 days '''
diff_year = int((limit_up - limit_down) / 180.0) + 1
''' define your date ranges from dates file written out above '''
brackets = [[1523,1639],[1843,1846],[1878,1902],[1993,1994],[2268,2668]]
''' read in dates and make brackets '''
filelists = {}
for bracket in brackets:
filelists[str(bracket[0]) + '_' + str(bracket[1])] = []
firstchip = 'no'
if 1 == 0: #firstchip == 'yes':
''' loop through the periods and make date brackets '''
brackets = []
filelists = {}
for i in range(diff_year):
start = limit_down + i * 180
end = limit_down + (i + 1) * 180
brackets.append([start, end])
filelists[str(start) + '_' + str(month_period)] = []
firstchip = 'no'
filelists['all'] = []
''' go through observations and which time bracket each observation fits into '''
for obs in datelist:
filelists['all'].append(obs[1])
for bracket in brackets:
''' also make a master flat '''
''' figure out where the brackets go '''
if bracket[0] <= float(obs[0]) and float(obs[0]) <= bracket[1]:
filelists[str(bracket[0]) + '_' + str(bracket[1])].append(obs[1])
paramslist = [{'method': 'MEDIAN','lo_clip':'3.0 3.0'},{'method': 'CLIPMEAN','lo_clip':'1.0 1.0'},{'method': 'CLIPMEAN','lo_clip':'2.0 2.0'}]
for params in paramslist:
scriptname = 'script' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber)
outinfo = dir + scriptname
''' script to do imcombine '''
script = open(scriptname,'w')
''' script to divide by superflat'''
divscript = open(scriptname + ".py",'w')
''' script to bin and normalize'''
normscript = open(scriptname + "_norm.py",'w')
batchnormfiles.write('bsub -R rhel50 -q long -e ' + outinfo + 'norm -o ' + outinfo + 'norm python ' + scriptname + '_norm.py\n')
batchdivfiles.write('bsub -R rhel50 -q long -e ' + outinfo + 'py -o ' + outinfo + 'py python ' + scriptname + '.py\n')
batchfiles.write('bsub -R rhel50 -q long -e ' + outinfo + ' -o ' + outinfo + ' source ' + scriptname + '\n')
for key in filelists.keys():
file = open(key + '_chip' + str(chipnumber),'w')
for ele in filelists[key]:
file.write(ele.replace('1OC.fits',str(chipnumber) + 'OC.fits') + '\n')
file.close()
''' rescale -s 1 '''
method = params['method']
lo_hi_rank = '1 1'
lo_hi_rej = '4000 30000'
lo_hi_clip = params['lo_clip']
input_list = key
output_image = key + '_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + ".fits"
command = "imcombflat -i " + input_list + "_chip" + str(chipnumber) + " -o " + dir + output_image + " -s 1 -c " + method + " -e " + lo_hi_rank + " -t " + lo_hi_rej + " -l " + lo_hi_clip
script.write(command + '\n')
divided_prefix = 'div_' + key + '_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + ".fits"
divided_image = divided_prefix + str(chipnumber) + ".fits"
binned_image = "/BINNED/" + divided_prefix + "mos.fits"
binned_normal_image = "/BINNED/" + divided_prefix + "mos_normal.fits"
if key != 'all':
''' divide each chip by the comprehensive 'all' flat '''
all_image = 'all_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + ".fits"
divscript.write("print '" + output_image + "'\n")
divscript.write("import re,os\n")
divscript.write("os.system('rm " + dir + divided_image + "')\n")
divscript.write("os.system(\"ic '%1 %2 / ' " + dir + output_image + " " + dir + all_image + " > " + dir + divided_image + "\")\n")
''' bin chips and normalize binned image '''
normscript.write("os.system(\"./create_binnedmosaics.sh " + dir + " \"\" " + divided_prefix + " 8 -32 \")\n")
normscript.write("os.system(\"imstats " + dir + binned_image + " -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o outlist\")\n")
normscript.write("p = open('outlist').readlines()[-1]\n")
normscript.write("import re\n")
normscript.write("mode = re.split('\s+',p)[1]\n")
normscript.write("os.system(\"ic '%1 \" + mode + \" / ' " + dir + binned_image + " > " + binned_normal_image + \")\n")
#divscript.write("os.system('rm divoutA.fits')\n")
#divscript.write("os.system('rm divoutB.fits')\n")
#divscript.write("os.system(\"imstats " + dir + output_image + " -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o outlist\")\n")
#divscript.write("p = open('outlist').readlines()[-1]\n")
#divscript.write("import re\n")
#divscript.write("mode = re.split('\s+',p)[1]\n")
#divscript.write("os.system(\"ic '%1 \" + mode + \" / ' " + dir + output_image + " > divoutA.fits \")\n")
#divscript.write("os.system(\"imstats " + dir + all_image + " -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o outlist\")\n")
#divscript.write("p = open('outlist').readlines()[-1]\n")
#divscript.write("mode = re.split('\s+',p)[1]\n")
#divscript.write("os.system(\"ic '%1 \" + mode + \" / ' " + dir + all_image + " > divoutB.fits \")\n")
#divscript.write("os.system(\"ic '%1 %2 / ' divoutA.fits divoutB.fits > " + dir + divided_image + "\")\n")
#divscript.write("os.system('rm divoutA.fits')\n")
#divscript.write("os.system('rm divoutB.fits')\n")
print command
script.close()
#os.system(command)
''' make lists of files to combine together, then combine them '''
#os.system("ic '%1 %2 /' " + img + " " + flat + " > " + img.replace('.fits','M.fits'))
#get_date()
batchfiles.close()
if __name__ == '__main__':
import os
dir = os.environ['dougdir'] + 'nobackup/SUBARU/skyflats/'
#runit(dir)
location = os.environ['subdir'] + 'SUBARU/2007-07-18_skyflat_test/SKYFLAT/'
combineperiods(6,location)
''' then need to run ./cp_aux_data.sh ${SUBARUDIR} ${run}_${filter} ${DOUGDIR}/skyflat_10_2 '''
| |
__doc__ = """CSV parsing and writing.
This module provides classes that assist in the reading and writing
of Comma Separated Value (CSV) files, and implements the interface
described by PEP 305. Although many CSV files are simple to parse,
the format is not formally defined by a stable specification and
is subtle enough that parsing lines of a CSV file with something
like line.split(\",\") is bound to fail. The module supports three
basic APIs: reading, writing, and registration of dialects.
DIALECT REGISTRATION:
Readers and writers support a dialect argument, which is a convenient
handle on a group of settings. When the dialect argument is a string,
it identifies one of the dialects previously registered with the module.
If it is a class or instance, the attributes of the argument are used as
the settings for the reader or writer:
class excel:
delimiter = ','
quotechar = '\"'
escapechar = None
doublequote = True
skipinitialspace = False
lineterminator = '\\r\\n'
quoting = QUOTE_MINIMAL
SETTINGS:
* quotechar - specifies a one-character string to use as the
quoting character. It defaults to '\"'.
* delimiter - specifies a one-character string to use as the
field separator. It defaults to ','.
* skipinitialspace - specifies how to interpret whitespace which
immediately follows a delimiter. It defaults to False, which
means that whitespace immediately following a delimiter is part
of the following field.
* lineterminator - specifies the character sequence which should
terminate rows.
* quoting - controls when quotes should be generated by the writer.
It can take on any of the following module constants:
csv.QUOTE_MINIMAL means only when required, for example, when a
field contains either the quotechar or the delimiter
csv.QUOTE_ALL means that quotes are always placed around fields.
csv.QUOTE_NONNUMERIC means that quotes are always placed around
fields which do not parse as integers or floating point
numbers.
csv.QUOTE_NONE means that quotes are never placed around fields.
* escapechar - specifies a one-character string used to escape
the delimiter when quoting is set to QUOTE_NONE.
* doublequote - controls the handling of quotes inside fields. When
True, two consecutive quotes are interpreted as one during read,
and when writing, each quote character embedded in the data is
written as two quotes.
"""
__version__ = "1.0"
__all__ = [
'Dialect', 'Error', 'QUOTE_ALL', 'QUOTE_MINIMAL', 'QUOTE_NONE',
'QUOTE_NONNUMERIC', 'Reader', 'Writer', '__doc__', '__version__',
'_call_dialect', '_dialects', '_field_limit', 'field_size_limit',
'get_dialect', 'list_dialects', 'reader', 'register_dialect',
'undefined', 'unregister_dialect', 'writer'
]
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE = range(4)
_dialects = {}
_field_limit = 128 * 1024 # max parsed field size
class Error(Exception):
pass
class Dialect(object):
"""CSV dialect
The Dialect type records CSV parsing and generation options."""
__slots__ = ["_delimiter", "_doublequote", "_escapechar",
"_lineterminator", "_quotechar", "_quoting",
"_skipinitialspace", "_strict"]
def __new__(cls, dialect, **kwargs):
for name in kwargs:
if '_' + name not in Dialect.__slots__:
raise TypeError("unexpected keyword argument '%s'" %
(name,))
if dialect is not None:
if isinstance(dialect, basestring):
dialect = get_dialect(dialect)
# Can we reuse this instance?
if (isinstance(dialect, Dialect)
and all(value is None for value in kwargs.itervalues())):
return dialect
self = object.__new__(cls)
def set_char(x):
if x is None:
return None
if isinstance(x, str) and len(x) <= 1:
return x
raise TypeError("%r must be a 1-character string" % (name,))
def set_str(x):
if isinstance(x, str):
return x
raise TypeError("%r must be a string" % (name,))
def set_quoting(x):
if x in range(4):
return x
raise TypeError("bad 'quoting' value")
attributes = {"delimiter": (',', set_char),
"doublequote": (True, bool),
"escapechar": (None, set_char),
"lineterminator": ("\r\n", set_str),
"quotechar": ('"', set_char),
"quoting": (QUOTE_MINIMAL, set_quoting),
"skipinitialspace": (False, bool),
"strict": (False, bool),
}
# Copy attributes
notset = object()
for name in Dialect.__slots__:
name = name[1:]
value = notset
if name in kwargs:
value = kwargs[name]
elif dialect is not None:
value = getattr(dialect, name, notset)
# mapping by name: (default, converter)
if value is notset:
value = attributes[name][0]
if name == 'quoting' and not self.quotechar:
value = QUOTE_NONE
else:
converter = attributes[name][1]
if converter:
value = converter(value)
# setattr(self, '_' + name, value)
self.__dict__['_' + name] = value
if not self.delimiter:
raise TypeError("delimiter must be set")
if self.quoting != QUOTE_NONE and not self.quotechar:
raise TypeError("quotechar must be set if quoting enabled")
if not self.lineterminator:
raise TypeError("lineterminator must be set")
return self
delimiter = property(lambda self: self._delimiter)
doublequote = property(lambda self: self._doublequote)
escapechar = property(lambda self: self._escapechar)
lineterminator = property(lambda self: self._lineterminator)
quotechar = property(lambda self: self._quotechar)
quoting = property(lambda self: self._quoting)
skipinitialspace = property(lambda self: self._skipinitialspace)
strict = property(lambda self: self._strict)
def _call_dialect(dialect_inst, kwargs):
return Dialect(dialect_inst, **kwargs)
def register_dialect(name, dialect=None, **kwargs):
"""Create a mapping from a string name to a dialect class.
dialect = csv.register_dialect(name, dialect)"""
if not isinstance(name, basestring):
raise TypeError("dialect name must be a string or unicode")
dialect = _call_dialect(dialect, kwargs)
_dialects[name] = dialect
def unregister_dialect(name):
"""Delete the name/dialect mapping associated with a string name.\n
csv.unregister_dialect(name)"""
try:
del _dialects[name]
except KeyError:
raise Error("unknown dialect")
def get_dialect(name):
"""Return the dialect instance associated with name.
dialect = csv.get_dialect(name)"""
try:
return _dialects[name]
except KeyError:
raise Error("unknown dialect")
def list_dialects():
"""Return a list of all know dialect names
names = csv.list_dialects()"""
return list(_dialects)
class Reader(object):
"""CSV reader
Reader objects are responsible for reading and parsing tabular data
in CSV format."""
(START_RECORD, START_FIELD, ESCAPED_CHAR, IN_FIELD,
IN_QUOTED_FIELD, ESCAPE_IN_QUOTED_FIELD, QUOTE_IN_QUOTED_FIELD,
EAT_CRNL) = range(8)
def __init__(self, iterator, dialect=None, **kwargs):
self.dialect = _call_dialect(dialect, kwargs)
self.input_iter = iter(iterator)
self.line_num = 0
self._parse_reset()
def _parse_reset(self):
self.field = ''
self.fields = []
self.state = self.START_RECORD
self.numeric_field = False
def __iter__(self):
return self
def next(self):
self._parse_reset()
while True:
try:
line = next(self.input_iter)
except StopIteration:
# End of input OR exception
if len(self.field) > 0:
raise Error("newline inside string")
raise
self.line_num += 1
if '\0' in line:
raise Error("line contains NULL byte")
pos = 0
while pos < len(line):
pos = self._parse_process_char(line, pos)
self._parse_eol()
if self.state == self.START_RECORD:
break
fields = self.fields
self.fields = []
return fields
def _parse_process_char(self, line, pos):
c = line[pos]
if self.state == self.IN_FIELD:
# in unquoted field
pos2 = pos
while True:
if c in '\n\r':
# end of line - return [fields]
if pos2 > pos:
self._parse_add_char(line[pos:pos2])
pos = pos2
self._parse_save_field()
self.state = self.EAT_CRNL
elif c == self.dialect.escapechar:
# possible escaped character
pos2 -= 1
self.state = self.ESCAPED_CHAR
elif c == self.dialect.delimiter:
# save field - wait for new field
if pos2 > pos:
self._parse_add_char(line[pos:pos2])
pos = pos2
self._parse_save_field()
self.state = self.START_FIELD
else:
# normal character - save in field
pos2 += 1
if pos2 < len(line):
c = line[pos2]
continue
break
if pos2 > pos:
self._parse_add_char(line[pos:pos2])
pos = pos2 - 1
elif self.state == self.START_RECORD:
if c in '\n\r':
self.state = self.EAT_CRNL
else:
self.state = self.START_FIELD
# restart process
self._parse_process_char(line, pos)
elif self.state == self.START_FIELD:
if c in '\n\r':
# save empty field - return [fields]
self._parse_save_field()
self.state = self.EAT_CRNL
elif (c == self.dialect.quotechar
and self.dialect.quoting != QUOTE_NONE):
# start quoted field
self.state = self.IN_QUOTED_FIELD
elif c == self.dialect.escapechar:
# possible escaped character
self.state = self.ESCAPED_CHAR
elif c == ' ' and self.dialect.skipinitialspace:
# ignore space at start of field
pass
elif c == self.dialect.delimiter:
# save empty field
self._parse_save_field()
else:
# begin new unquoted field
if self.dialect.quoting == QUOTE_NONNUMERIC:
self.numeric_field = True
self._parse_add_char(c)
self.state = self.IN_FIELD
elif self.state == self.ESCAPED_CHAR:
self._parse_add_char(c)
self.state = self.IN_FIELD
elif self.state == self.IN_QUOTED_FIELD:
if c == self.dialect.escapechar:
# possible escape character
self.state = self.ESCAPE_IN_QUOTED_FIELD
elif (c == self.dialect.quotechar
and self.dialect.quoting != QUOTE_NONE):
if self.dialect.doublequote:
# doublequote; " represented by ""
self.state = self.QUOTE_IN_QUOTED_FIELD
else:
#end of quote part of field
self.state = self.IN_FIELD
else:
# normal character - save in field
self._parse_add_char(c)
elif self.state == self.ESCAPE_IN_QUOTED_FIELD:
self._parse_add_char(c)
self.state = self.IN_QUOTED_FIELD
elif self.state == self.QUOTE_IN_QUOTED_FIELD:
# doublequote - seen a quote in a quoted field
if (c == self.dialect.quotechar
and self.dialect.quoting != QUOTE_NONE):
# save "" as "
self._parse_add_char(c)
self.state = self.IN_QUOTED_FIELD
elif c == self.dialect.delimiter:
# save field - wait for new field
self._parse_save_field()
self.state = self.START_FIELD
elif c in '\r\n':
# end of line - return [fields]
self._parse_save_field()
self.state = self.EAT_CRNL
elif not self.dialect.strict:
self._parse_add_char(c)
self.state = self.IN_FIELD
else:
raise Error("'%c' expected after '%c'" %
(self.dialect.delimiter, self.dialect.quotechar))
elif self.state == self.EAT_CRNL:
if c not in '\r\n':
raise Error("new-line character seen in unquoted field - "
"do you need to open the file "
"in universal-newline mode?")
else:
raise RuntimeError("unknown state: %r" % (self.state,))
return pos + 1
def _parse_eol(self):
if self.state == self.EAT_CRNL:
self.state = self.START_RECORD
elif self.state == self.START_RECORD:
# empty line - return []
pass
elif self.state == self.IN_FIELD:
# in unquoted field
# end of line - return [fields]
self._parse_save_field()
self.state = self.START_RECORD
elif self.state == self.START_FIELD:
# save empty field - return [fields]
self._parse_save_field()
self.state = self.START_RECORD
elif self.state == self.ESCAPED_CHAR:
self._parse_add_char('\n')
self.state = self.IN_FIELD
elif self.state == self.IN_QUOTED_FIELD:
pass
elif self.state == self.ESCAPE_IN_QUOTED_FIELD:
self._parse_add_char('\n')
self.state = self.IN_QUOTED_FIELD
elif self.state == self.QUOTE_IN_QUOTED_FIELD:
# end of line - return [fields]
self._parse_save_field()
self.state = self.START_RECORD
else:
raise RuntimeError("unknown state: %r" % (self.state,))
def _parse_save_field(self):
field, self.field = self.field, ''
if self.numeric_field:
self.numeric_field = False
field = float(field)
self.fields.append(field)
def _parse_add_char(self, c):
if len(self.field) + len(c) > _field_limit:
raise Error("field larger than field limit (%d)" % (_field_limit))
self.field += c
class Writer(object):
"""CSV writer
Writer objects are responsible for generating tabular data
in CSV format from sequence input."""
def __init__(self, file, dialect=None, **kwargs):
if not (hasattr(file, 'write') and callable(file.write)):
raise TypeError("argument 1 must have a 'write' method")
self.writeline = file.write
self.dialect = _call_dialect(dialect, kwargs)
def _join_reset(self):
self.rec = []
self.num_fields = 0
def _join_append(self, field, quoted, quote_empty):
dialect = self.dialect
# If this is not the first field we need a field separator
if self.num_fields > 0:
self.rec.append(dialect.delimiter)
if dialect.quoting == QUOTE_NONE:
need_escape = tuple(dialect.lineterminator) + (
dialect.escapechar, # escapechar always first
dialect.delimiter, dialect.quotechar)
else:
for c in tuple(dialect.lineterminator) + (
dialect.delimiter, dialect.escapechar):
if c and c in field:
quoted = True
need_escape = ()
if dialect.quotechar in field:
if dialect.doublequote:
field = field.replace(dialect.quotechar,
dialect.quotechar * 2)
quoted = True
else:
need_escape = (dialect.quotechar,)
for c in need_escape:
if c and c in field:
if not dialect.escapechar:
raise Error("need to escape, but no escapechar set")
field = field.replace(c, dialect.escapechar + c)
# If field is empty check if it needs to be quoted
if field == '' and quote_empty:
if dialect.quoting == QUOTE_NONE:
raise Error("single empty field record must be quoted")
quoted = 1
if quoted:
field = dialect.quotechar + field + dialect.quotechar
self.rec.append(field)
self.num_fields += 1
def writerow(self, row):
dialect = self.dialect
try:
rowlen = len(row)
except TypeError:
raise Error("sequence expected")
# join all fields in internal buffer
self._join_reset()
for field in row:
quoted = False
if dialect.quoting == QUOTE_NONNUMERIC:
try:
float(field)
except:
quoted = True
# This changed since 2.5:
# quoted = not isinstance(field, (int, long, float))
elif dialect.quoting == QUOTE_ALL:
quoted = True
if field is None:
value = ""
elif isinstance(field, float):
value = repr(field)
else:
value = str(field)
self._join_append(value, quoted, rowlen == 1)
# add line terminator
self.rec.append(dialect.lineterminator)
self.writeline(''.join(self.rec))
def writerows(self, rows):
for row in rows:
self.writerow(row)
def reader(*args, **kwargs):
"""
csv_reader = reader(iterable [, dialect='excel']
[optional keyword args])
for row in csv_reader:
process(row)
The "iterable" argument can be any object that returns a line
of input for each iteration, such as a file object or a list. The
optional \"dialect\" parameter is discussed below. The function
also accepts optional keyword arguments which override settings
provided by the dialect.
The returned object is an iterator. Each iteration returns a row
of the CSV file (which can span multiple input lines)"""
return Reader(*args, **kwargs)
def writer(*args, **kwargs):
"""
csv_writer = csv.writer(fileobj [, dialect='excel']
[optional keyword args])
for row in sequence:
csv_writer.writerow(row)
[or]
csv_writer = csv.writer(fileobj [, dialect='excel']
[optional keyword args])
csv_writer.writerows(rows)
The \"fileobj\" argument can be any object that supports the file API."""
return Writer(*args, **kwargs)
undefined = object()
def field_size_limit(limit=undefined):
"""Sets an upper limit on parsed fields.
csv.field_size_limit([limit])
Returns old limit. If limit is not given, no new limit is set and
the old limit is returned"""
global _field_limit
old_limit = _field_limit
if limit is not undefined:
if not isinstance(limit, (int, long)):
raise TypeError("int expected, got %s" %
(limit.__class__.__name__,))
_field_limit = limit
return old_limit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.